From 39b5f608ba1ca2e53e46baa726f7ebd1fbd06181 Mon Sep 17 00:00:00 2001 From: itamarEarly Date: Mon, 3 Feb 2025 18:15:15 +0200 Subject: [PATCH 1/2] feat(tests): add support for running EarlyAI-generated tests - Introduced EarlyAI-generated tests to enhance test coverage and quality. - Added support for running these tests by making the following updates: - **nx.json**: Added a `test:early` target to enable running EarlyAI-specific tests for the relevant projects. - **package.json**: Updated scripts to include `test:early` for simplified test execution. - **project.json** (for the updated projects): Configured `test:early` targets to handle EarlyAI test execution. - **pytest.ini**: Added global configuration for custom markers (`happy_path`, `edge_case`) to better organize tests. --- EarlyAI_README.md | 125 ++++++++++++++++++ nx.json | 16 ++- package.json | 4 +- .../test_early__handle_exception.py | 71 ++++++++++ ...early_acount_prompt_tokens_from_request.py | 55 ++++++++ .../test_early_async_wrapper.py | 73 ++++++++++ ..._early_count_prompt_tokens_from_request.py | 124 +++++++++++++++++ .../test_early_error_metrics_attributes.py | 82 ++++++++++++ .../test_early_utils/test_early_run_async.py | 50 +++++++ .../test_early_set_span_attribute.py | 79 +++++++++++ .../test_early_shared_metrics_attributes.py | 85 ++++++++++++ .../test_early_should_send_prompts.py | 65 +++++++++ .../test_early_sync_wrapper.py | 52 ++++++++ .../project.json | 11 ++ .../test_early_utils/test_early_dont_throw.py | 80 +++++++++++ .../test_early_error_metrics_attributes.py | 68 ++++++++++ .../test_early_model_as_dict.py | 71 ++++++++++ .../test_early_set_span_attribute.py | 74 +++++++++++ .../test_early_shared_metrics_attributes.py | 96 ++++++++++++++ .../test_early_should_send_prompts.py | 48 +++++++ .../project.json | 11 ++ .../haystack/test_early_utils/__init__.py | 0 .../test_early_process_request.py | 96 ++++++++++++++ .../test_early_process_response.py | 98 ++++++++++++++ .../test_early_set_span_attribute.py | 101 ++++++++++++++ .../test_early_should_send_prompts.py | 39 ++++++ .../project.json | 11 ++ .../pinecone/test_early_utils/__init__.py | 0 .../test_early_utils/test_early_dont_throw.py | 48 +++++++ .../test_early_is_metrics_enabled.py | 78 +++++++++++ .../test_early_set_span_attribute.py | 73 ++++++++++ .../project.json | 11 ++ pytest.ini | 5 + 33 files changed, 1898 insertions(+), 2 deletions(-) create mode 100644 EarlyAI_README.md create mode 100644 packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early__handle_exception.py create mode 100644 packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_acount_prompt_tokens_from_request.py create mode 100644 packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_async_wrapper.py create mode 100644 packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_count_prompt_tokens_from_request.py create mode 100644 packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_error_metrics_attributes.py create mode 100644 packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_run_async.py create mode 100644 packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_set_span_attribute.py create mode 100644 packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_shared_metrics_attributes.py create mode 100644 packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_should_send_prompts.py create mode 100644 packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_sync_wrapper.py create mode 100644 packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_dont_throw.py create mode 100644 packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_error_metrics_attributes.py create mode 100644 packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_model_as_dict.py create mode 100644 packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_set_span_attribute.py create mode 100644 packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_shared_metrics_attributes.py create mode 100644 packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_should_send_prompts.py create mode 100644 packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/__init__.py create mode 100644 packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_request.py create mode 100644 packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_response.py create mode 100644 packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_set_span_attribute.py create mode 100644 packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_should_send_prompts.py create mode 100644 packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/__init__.py create mode 100644 packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_dont_throw.py create mode 100644 packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_is_metrics_enabled.py create mode 100644 packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_set_span_attribute.py create mode 100644 pytest.ini diff --git a/EarlyAI_README.md b/EarlyAI_README.md new file mode 100644 index 000000000..a42b84678 --- /dev/null +++ b/EarlyAI_README.md @@ -0,0 +1,125 @@ +# EarlyAI Test Integration in OpenLLmetry + +## Exeutive Summary + +This document outlines the integration of **EarlyAI-generated tests** into the OpenLLmetry monorepo. These tests improve test coverage and ensure instrumentation correctness while keeping the existing test flow intact. + +Here is a summay of the test generated for the utils folder on the following projects: + +| Project | Total Tests | Passed | Failed | +| ------------------------------------------- | ----------- | ------- | ------ | +| **opentelemetry-instrumentation-anthropic** | 48 | 47 | 1 | +| **opentelemetry-instrumentation-haystack** | 20 | 20 | 0 | +| **opentelemetry-instrumentation-pinecone** | 18 | 17 | 1 | +| **opentelemetry-instrumentation-groq** | 29 | 29 | 0 | +| **Total** | **115** | **113** | **2** | + +## Failure Details + +### opentelemetry-instrumentation-pinecone + +**TestSetSpanAttribute.test_set_attribute_with_none_name_and_valid_value** failed. + +- **Assertion failed:** Expected `set_attribute` to not be called, but it was called once. + +### opentelemetry-instrumentation-anthropic + +**TestSharedMetricsAttributes.test_shared_metrics_attributes_with_none_response** failed. + +- Assertion failed: Expected a structured response, but `None` was returned + +## Key Additions + +### 1. Test Configuration: + +- Updated **nx.json** to define `test:early` as a target for running EarlyAI tests across projects. +- Updated **package.json** to include scripts for running EarlyAI tests. +- Added a global **pytest.ini** file to manage test markers and configurations centrally. + +### 2. Test Execution Support: + +- Tests can be executed across the **entire monorepo** or **per project**. +- EarlyAI tests displayed in the **Early** VS Code extension. + +## How to Run EarlyAI Tests + +### Run All EarlyAI Tests Across All Projects + +```bash +npm run test:early +``` + +This command runs all EarlyAI tests across the monorepo. + +### Run EarlyAI Tests for a Specific Project + +```bash +nx run :test:early +``` + +Replace `` with the relevant project (e.g., `opentelemetry-instrumentation-openai`). + +--- + +## Technical Changes + +### 1. Updated `nx.json` + +We added a **global target** for EarlyAI test execution: + +```json +"test:early": { + "executor": "@nxlv/python:run-commands", + "options": { + "command": ". .venv/Scripts/activate && poetry run pytest source/test_early_utils/", + "cwd": "{projectRoot}" + } + } +``` + +### 2. Updated `package.json` + +Added a global script for running EarlyAI tests: + +```json +"scripts": { + "test:early": "nx run-many --target=test:early" +} +``` + +### 3. Added a Global `pytest.ini` + +Instead of managing individual `pytest.ini` files per project, we added a **global pytest.ini**: + +```ini +[tool.pytest.ini_options] +markers = [ + "describe: Custom marker for test groups", + "happy_path: Tests the 'happy path' of a function", + "edge_case: Tests edge cases of a function" +] +``` + +### 4. Added `test:early` Target in Each Project + +Each project where EarlyAI tests were added includes the following target in its `project.json`: + +```json +"test:early": { + "executor": "@nxlv/python:run-commands", + "outputs": [ + "{workspaceRoot}/reports/packages/opentelemetry-instrumentation-anthropic/unittests/early", + "{workspaceRoot}/coverage/packages/opentelemetry-instrumentation-anthropic/early" + ], + "options": { + "command": "poetry run pytest opentelemetry/instrumentation/anthropic/test_early_utils/", + "cwd": "packages/opentelemetry-instrumentation-anthropic" + } +} +``` + +(Each project follows a similar structure, replacing **anthropic** with the respective project name.) + +[Early-Ai for Vscode](vscode:extension/Early-AI.EarlyAI) + +[Early-Ai for Cursor](cursor:extension/Early-AI.EarlyAI) diff --git a/nx.json b/nx.json index 79d7fb8c9..a2564a2fb 100644 --- a/nx.json +++ b/nx.json @@ -1,5 +1,19 @@ { "extends": "nx/presets/npm.json", "$schema": "./node_modules/nx/schemas/nx-schema.json", - "plugins": ["@nxlv/python"] + "plugins": ["@nxlv/python"], + "projects": { + "default": { + "root": ".", + "targets": { + "test:early": { + "executor": "@nxlv/python:run-commands", + "options": { + "command": ". .venv/Scripts/activate && poetry run pytest source/test_early_utils/", + "cwd": "{projectRoot}" + } + } + } + } + } } diff --git a/package.json b/package.json index fd4dfde7c..aaebcb41c 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,9 @@ "name": "openllmetry", "version": "0.0.0", "license": "MIT", - "scripts": {}, + "scripts": { + "test:early": "nx run-many --target=test:early" + }, "private": true, "devDependencies": { "@nxlv/python": "^20.2.0", diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early__handle_exception.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early__handle_exception.py new file mode 100644 index 000000000..02a439d3b --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early__handle_exception.py @@ -0,0 +1,71 @@ +import pytest +import logging +from unittest.mock import Mock, patch +from opentelemetry.instrumentation.anthropic.utils import dont_throw + +# Mock Config to control the behavior of exception_logger +class MockConfig: + exception_logger = None + +# Patch the Config used in the module with our MockConfig +@pytest.fixture(autouse=True) +def patch_config(): + with patch('opentelemetry.instrumentation.anthropic.utils.Config', MockConfig): + yield + +# Describe block for _handle_exception related tests +@pytest.mark.describe("_handle_exception") +class TestHandleException: + + @pytest.mark.happy_path + def test_sync_function_no_exception(self): + """Test that a synchronous function runs without exceptions.""" + @dont_throw + def no_exception_func(): + return "success" + + assert no_exception_func() == "success" + + @pytest.mark.happy_path + @pytest.mark.asyncio + async def test_async_function_no_exception(self): + """Test that an asynchronous function runs without exceptions.""" + @dont_throw + async def no_exception_func(): + return "success" + + assert await no_exception_func() == "success" + + @pytest.mark.edge_case + def test_sync_function_with_exception(self, caplog): + """Test that a synchronous function logs an exception without raising it.""" + @dont_throw + def exception_func(): + raise ValueError("Test exception") + + with caplog.at_level(logging.DEBUG): + exception_func() + assert "OpenLLMetry failed to trace in exception_func, error:" in caplog.text + + @pytest.mark.edge_case + @pytest.mark.asyncio + async def test_async_function_with_exception(self, caplog): + """Test that an asynchronous function logs an exception without raising it.""" + @dont_throw + async def exception_func(): + raise ValueError("Test exception") + + with caplog.at_level(logging.DEBUG): + await exception_func() + assert "OpenLLMetry failed to trace in exception_func, error:" in caplog.text + + @pytest.mark.edge_case + def test_no_exception_logger(self): + """Test that no error occurs if exception_logger is None.""" + MockConfig.exception_logger = None + + @dont_throw + def exception_func(): + raise ValueError("Test exception") + + exception_func() # Should not raise any error \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_acount_prompt_tokens_from_request.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_acount_prompt_tokens_from_request.py new file mode 100644 index 000000000..9c3ee5f9e --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_acount_prompt_tokens_from_request.py @@ -0,0 +1,55 @@ +import pytest +import asyncio +from unittest.mock import AsyncMock + +# Assuming the function is imported from the module +from opentelemetry.instrumentation.anthropic.utils import acount_prompt_tokens_from_request + +@pytest.mark.describe("acount_prompt_tokens_from_request") +class TestAcountPromptTokensFromRequest: + + @pytest.mark.happy_path + @pytest.mark.asyncio + async def test_single_prompt(self): + """Test with a single prompt string to ensure correct token counting.""" + anthropic = AsyncMock() + anthropic.count_tokens = AsyncMock(return_value=5) + request = {"prompt": "This is a test prompt."} + + result = await acount_prompt_tokens_from_request(anthropic, request) + + assert result == 5 + anthropic.count_tokens.assert_awaited_once_with("This is a test prompt.") + + + @pytest.mark.edge_case + @pytest.mark.asyncio + async def test_no_prompt_or_messages(self): + """Test with no prompt or messages to ensure zero tokens are counted.""" + anthropic = AsyncMock() + request = {} + + result = await acount_prompt_tokens_from_request(anthropic, request) + + assert result == 0 + anthropic.count_tokens.assert_not_awaited() + + @pytest.mark.edge_case + @pytest.mark.asyncio + async def test_message_with_non_string_content(self): + """Test with message content that is not a string to ensure it is ignored.""" + anthropic = AsyncMock() + anthropic.count_tokens = AsyncMock(return_value=0) + request = { + "messages": [ + {"content": 12345}, # Non-string content + {"content": None} # None content + ] + } + + result = await acount_prompt_tokens_from_request(anthropic, request) + + assert result == 0 + anthropic.count_tokens.assert_not_awaited() + + \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_async_wrapper.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_async_wrapper.py new file mode 100644 index 000000000..a67a0f963 --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_async_wrapper.py @@ -0,0 +1,73 @@ +import pytest +import asyncio +import logging +from unittest.mock import AsyncMock, MagicMock, patch +from opentelemetry.instrumentation.anthropic.utils import dont_throw + +# Mock Config to avoid side effects during testing +class MockConfig: + exception_logger = None + +@pytest.fixture(autouse=True) +def mock_config(): + with patch('opentelemetry.instrumentation.anthropic.utils.Config', new=MockConfig): + yield + +@pytest.mark.describe("Tests for async_wrapper") +class TestAsyncWrapper: + + @pytest.mark.happy_path + @pytest.mark.asyncio + async def test_async_wrapper_happy_path(self): + """Test async_wrapper with a coroutine that succeeds.""" + async def successful_coroutine(): + return "success" + + wrapped_func = dont_throw(successful_coroutine) + result = await wrapped_func() + assert result == "success" + + @pytest.mark.happy_path + @pytest.mark.asyncio + async def test_async_wrapper_with_args(self): + """Test async_wrapper with a coroutine that takes arguments.""" + async def coroutine_with_args(x, y): + return x + y + + wrapped_func = dont_throw(coroutine_with_args) + result = await wrapped_func(2, 3) + assert result == 5 + + @pytest.mark.edge_case + @pytest.mark.asyncio + async def test_async_wrapper_exception_handling(self, caplog): + """Test async_wrapper with a coroutine that raises an exception.""" + async def failing_coroutine(): + raise ValueError("Test exception") + + wrapped_func = dont_throw(failing_coroutine) + + with caplog.at_level(logging.DEBUG): + result = await wrapped_func() + assert result is None + assert "OpenLLMetry failed to trace in failing_coroutine" in caplog.text + + + @pytest.mark.edge_case + @pytest.mark.asyncio + async def test_async_wrapper_no_exception_logger(self, caplog): + """Test async_wrapper with a coroutine that raises an exception without an exception logger.""" + async def failing_coroutine(): + raise ValueError("Test exception") + + MockConfig.exception_logger = None + + wrapped_func = dont_throw(failing_coroutine) + + with caplog.at_level(logging.DEBUG): + result = await wrapped_func() + assert result is None + assert "OpenLLMetry failed to trace in failing_coroutine" in caplog.text + assert MockConfig.exception_logger is None + +# Note: The `@pytest.mark.asyncio` decorator is used to run async tests with pytest. \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_count_prompt_tokens_from_request.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_count_prompt_tokens_from_request.py new file mode 100644 index 000000000..f089a4630 --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_count_prompt_tokens_from_request.py @@ -0,0 +1,124 @@ +import pytest +from unittest.mock import Mock + +# Assuming the function is imported from the correct module +from opentelemetry.instrumentation.anthropic.utils import count_prompt_tokens_from_request + +@pytest.mark.describe("Tests for count_prompt_tokens_from_request") +class TestCountPromptTokensFromRequest: + + @pytest.mark.happy_path + def test_single_prompt_string(self): + """ + Test with a single prompt string to ensure the function counts tokens correctly. + """ + anthropic = Mock() + anthropic.count_tokens = Mock(return_value=5) + request = {"prompt": "Hello, world!"} + + result = count_prompt_tokens_from_request(anthropic, request) + + assert result == 5 + anthropic.count_tokens.assert_called_once_with("Hello, world!") + + @pytest.mark.happy_path + def test_multiple_messages_with_string_content(self): + """ + Test with multiple messages containing string content to ensure correct token counting. + """ + anthropic = Mock() + anthropic.count_tokens = Mock(side_effect=[3, 4]) + request = { + "messages": [ + {"content": "Hi"}, + {"content": "How are you?"} + ] + } + + result = count_prompt_tokens_from_request(anthropic, request) + + assert result == 7 + anthropic.count_tokens.assert_any_call("Hi") + anthropic.count_tokens.assert_any_call("How are you?") + + @pytest.mark.happy_path + def test_messages_with_list_content(self): + """ + Test with messages containing list content to ensure correct token counting. + """ + anthropic = Mock() + anthropic.count_tokens = Mock(side_effect=[2, 3]) + request = { + "messages": [ + {"content": [{"type": "text", "text": "Hello"}, {"type": "text", "text": "World"}]} + ] + } + + result = count_prompt_tokens_from_request(anthropic, request) + + assert result == 5 + anthropic.count_tokens.assert_any_call("Hello") + anthropic.count_tokens.assert_any_call("World") + + @pytest.mark.edge_case + def test_empty_request(self): + """ + Test with an empty request to ensure the function returns zero tokens. + """ + anthropic = Mock() + request = {} + + result = count_prompt_tokens_from_request(anthropic, request) + + assert result == 0 + anthropic.count_tokens.assert_not_called() + + @pytest.mark.edge_case + def test_no_count_tokens_method(self): + """ + Test when the anthropic object does not have a count_tokens method. + """ + anthropic = Mock() + del anthropic.count_tokens + request = {"prompt": "Hello, world!"} + + result = count_prompt_tokens_from_request(anthropic, request) + + assert result == 0 + + @pytest.mark.edge_case + def test_non_string_content_in_messages(self): + """ + Test with non-string content in messages to ensure they are ignored. + """ + anthropic = Mock() + anthropic.count_tokens = Mock(return_value=0) + request = { + "messages": [ + {"content": 123}, + {"content": {"type": "image", "url": "http://example.com/image.png"}} + ] + } + + result = count_prompt_tokens_from_request(anthropic, request) + + assert result == 0 + anthropic.count_tokens.assert_not_called() + + @pytest.mark.edge_case + def test_mixed_content_types_in_list(self): + """ + Test with mixed content types in a list to ensure only text types are counted. + """ + anthropic = Mock() + anthropic.count_tokens = Mock(return_value=3) + request = { + "messages": [ + {"content": [{"type": "text", "text": "Hello"}, {"type": "image", "url": "http://example.com/image.png"}]} + ] + } + + result = count_prompt_tokens_from_request(anthropic, request) + + assert result == 3 + anthropic.count_tokens.assert_called_once_with("Hello") \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_error_metrics_attributes.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_error_metrics_attributes.py new file mode 100644 index 000000000..efbbf680d --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_error_metrics_attributes.py @@ -0,0 +1,82 @@ +import pytest +from opentelemetry.instrumentation.anthropic.utils import error_metrics_attributes + +# Describe block for all tests related to error_metrics_attributes +@pytest.mark.describe("Tests for error_metrics_attributes function") +class TestErrorMetricsAttributes: + + @pytest.mark.happy_path + def test_happy_path_standard_exception(self): + """ + Test that error_metrics_attributes correctly extracts the error type + from a standard exception. + """ + exception = ValueError("An error occurred") + result = error_metrics_attributes(exception) + assert result == { + "gen_ai.system": "anthropic", + "error.type": "ValueError" + } + + @pytest.mark.happy_path + def test_happy_path_custom_exception(self): + """ + Test that error_metrics_attributes correctly extracts the error type + from a custom exception. + """ + class CustomException(Exception): + pass + + exception = CustomException("A custom error occurred") + result = error_metrics_attributes(exception) + assert result == { + "gen_ai.system": "anthropic", + "error.type": "CustomException" + } + + @pytest.mark.edge_case + def test_edge_case_empty_exception(self): + """ + Test that error_metrics_attributes handles an exception with no message. + """ + exception = Exception() + result = error_metrics_attributes(exception) + assert result == { + "gen_ai.system": "anthropic", + "error.type": "Exception" + } + + @pytest.mark.edge_case + def test_edge_case_non_standard_exception(self): + """ + Test that error_metrics_attributes handles a non-standard exception object. + """ + class NonStandardException: + __class__ = type("NonStandardException", (), {}) + + exception = NonStandardException() + result = error_metrics_attributes(exception) + assert result == { + "gen_ai.system": "anthropic", + "error.type": "NonStandardException" + } + + @pytest.mark.edge_case + def test_edge_case_subclass_exception(self): + """ + Test that error_metrics_attributes correctly identifies a subclassed exception. + """ + class BaseException(Exception): + pass + + class SubclassException(BaseException): + pass + + exception = SubclassException("Subclass error") + result = error_metrics_attributes(exception) + assert result == { + "gen_ai.system": "anthropic", + "error.type": "SubclassException" + } + +# To run these tests, you would typically use the command: pytest -v \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_run_async.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_run_async.py new file mode 100644 index 000000000..6a5d6e4ec --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_run_async.py @@ -0,0 +1,50 @@ +import pytest +import asyncio +import unittest + + +# Import the run_async function from the specified path +from opentelemetry.instrumentation.anthropic.utils import run_async + +@pytest.mark.describe("run_async function tests") +class TestRunAsync: + + @pytest.mark.happy_path + def test_run_async_with_running_loop(self): + """ + Test that run_async executes a coroutine when an event loop is already running. + """ + async def sample_coroutine(): + return "success" + + # Mock asyncio.get_running_loop to simulate a running loop + with unittest.mock.patch('asyncio.get_running_loop', return_value=asyncio.get_event_loop()): + result = run_async(sample_coroutine()) + assert result is None # Since the function doesn't return anything + + @pytest.mark.happy_path + def test_run_async_without_running_loop(self): + """ + Test that run_async executes a coroutine when no event loop is running. + """ + async def sample_coroutine(): + return "success" + + # Mock asyncio.get_running_loop to raise RuntimeError, simulating no running loop + with unittest.mock.patch('asyncio.get_running_loop', side_effect=RuntimeError): + result = run_async(sample_coroutine()) + assert result is None # Since the function doesn't return anything + + + @pytest.mark.edge_case + def test_run_async_with_exception_in_coroutine(self): + """ + Test that run_async handles exceptions raised within the coroutine. + """ + async def failing_coroutine(): + raise ValueError("Intentional error") + + # Mock asyncio.run to capture the exception + with unittest.mock.patch('asyncio.run', side_effect=ValueError("Intentional error")): + with pytest.raises(ValueError): + run_async(failing_coroutine()) diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_set_span_attribute.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_set_span_attribute.py new file mode 100644 index 000000000..e563e66f4 --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_set_span_attribute.py @@ -0,0 +1,79 @@ +import pytest +from unittest.mock import Mock + +# Import the function to be tested +from opentelemetry.instrumentation.anthropic.utils import set_span_attribute + +# Describe block for set_span_attribute tests +@pytest.mark.describe("set_span_attribute function") +class TestSetSpanAttribute: + + @pytest.mark.happy_path + def test_set_attribute_with_valid_value(self): + """ + Test that set_span_attribute sets the attribute when a valid non-empty value is provided. + """ + span = Mock() + set_span_attribute(span, "test.attribute", "valid_value") + span.set_attribute.assert_called_once_with("test.attribute", "valid_value") + + @pytest.mark.happy_path + def test_set_attribute_with_none_value(self): + """ + Test that set_span_attribute does not set the attribute when the value is None. + """ + span = Mock() + set_span_attribute(span, "test.attribute", None) + span.set_attribute.assert_not_called() + + @pytest.mark.happy_path + def test_set_attribute_with_empty_string(self): + """ + Test that set_span_attribute does not set the attribute when the value is an empty string. + """ + span = Mock() + set_span_attribute(span, "test.attribute", "") + span.set_attribute.assert_not_called() + + @pytest.mark.edge_case + def test_set_attribute_with_whitespace_string(self): + """ + Test that set_span_attribute sets the attribute when the value is a whitespace string. + """ + span = Mock() + set_span_attribute(span, "test.attribute", " ") + span.set_attribute.assert_called_once_with("test.attribute", " ") + + @pytest.mark.edge_case + def test_set_attribute_with_special_characters(self): + """ + Test that set_span_attribute sets the attribute when the value contains special characters. + """ + span = Mock() + special_value = "!@#$%^&*()_+" + set_span_attribute(span, "test.attribute", special_value) + span.set_attribute.assert_called_once_with("test.attribute", special_value) + + @pytest.mark.edge_case + def test_set_attribute_with_numeric_value(self): + """ + Test that set_span_attribute sets the attribute when the value is a numeric type. + """ + span = Mock() + numeric_value = 12345 + set_span_attribute(span, "test.attribute", numeric_value) + span.set_attribute.assert_called_once_with("test.attribute", numeric_value) + + @pytest.mark.edge_case + def test_set_attribute_with_boolean_value(self): + """ + Test that set_span_attribute sets the attribute when the value is a boolean type. + """ + span = Mock() + boolean_value = True + set_span_attribute(span, "test.attribute", boolean_value) + span.set_attribute.assert_called_once_with("test.attribute", boolean_value) + +# Run the tests +if __name__ == "__main__": + pytest.main() \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_shared_metrics_attributes.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_shared_metrics_attributes.py new file mode 100644 index 000000000..895cdd4b2 --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_shared_metrics_attributes.py @@ -0,0 +1,85 @@ +import pytest +from unittest.mock import MagicMock +from opentelemetry.instrumentation.anthropic.utils import shared_metrics_attributes +from opentelemetry.instrumentation.anthropic.config import Config +from opentelemetry.semconv_ai import SpanAttributes + +# Mock configuration for common metrics attributes +@pytest.fixture(autouse=True) +def mock_config(): + Config.get_common_metrics_attributes = MagicMock(return_value={"common_attr": "value"}) + +@pytest.mark.describe("shared_metrics_attributes") +class TestSharedMetricsAttributes: + + @pytest.mark.happy_path + def test_shared_metrics_attributes_with_valid_response(self): + """ + Test that shared_metrics_attributes returns the correct attributes + when given a valid response dictionary. + """ + response = {"model": "test-model"} + expected_attributes = { + "common_attr": "value", + "gen_ai.system": "anthropic", + SpanAttributes.LLM_RESPONSE_MODEL: "test-model", + } + assert shared_metrics_attributes(response) == expected_attributes + + @pytest.mark.happy_path + def test_shared_metrics_attributes_with_empty_response(self): + """ + Test that shared_metrics_attributes returns the correct attributes + when given an empty response dictionary. + """ + response = {} + expected_attributes = { + "common_attr": "value", + "gen_ai.system": "anthropic", + SpanAttributes.LLM_RESPONSE_MODEL: None, + } + assert shared_metrics_attributes(response) == expected_attributes + + @pytest.mark.edge_case + def test_shared_metrics_attributes_with_non_dict_response(self): + """ + Test that shared_metrics_attributes correctly handles a non-dict response + by converting it to a dictionary using __dict__. + """ + class ResponseObject: + def __init__(self): + self.model = "object-model" + + response = ResponseObject() + expected_attributes = { + "common_attr": "value", + "gen_ai.system": "anthropic", + SpanAttributes.LLM_RESPONSE_MODEL: "object-model", + } + assert shared_metrics_attributes(response) == expected_attributes + + @pytest.mark.edge_case + def test_shared_metrics_attributes_with_none_response(self): + """ + Test that shared_metrics_attributes handles a None response gracefully. + """ + response = None + expected_attributes = { + "common_attr": "value", + "gen_ai.system": "anthropic", + SpanAttributes.LLM_RESPONSE_MODEL: None, + } + assert shared_metrics_attributes(response) == expected_attributes + + @pytest.mark.edge_case + def test_shared_metrics_attributes_with_unexpected_attributes(self): + """ + Test that shared_metrics_attributes ignores unexpected attributes in the response. + """ + response = {"unexpected": "value", "model": "test-model"} + expected_attributes = { + "common_attr": "value", + "gen_ai.system": "anthropic", + SpanAttributes.LLM_RESPONSE_MODEL: "test-model", + } + assert shared_metrics_attributes(response) == expected_attributes \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_should_send_prompts.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_should_send_prompts.py new file mode 100644 index 000000000..27257fef6 --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_should_send_prompts.py @@ -0,0 +1,65 @@ +import os +import pytest +from unittest.mock import patch +from opentelemetry.instrumentation.anthropic.utils import should_send_prompts + +@pytest.mark.describe("Tests for should_send_prompts function") +class TestShouldSendPrompts: + + @pytest.mark.happy_path + def test_should_send_prompts_env_var_true(self): + """ + Test that should_send_prompts returns True when the TRACELOOP_TRACE_CONTENT + environment variable is set to 'true'. + """ + with patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "true"}): + assert should_send_prompts() is True + + @pytest.mark.happy_path + def test_should_send_prompts_env_var_true_case_insensitive(self): + """ + Test that should_send_prompts returns True when the TRACELOOP_TRACE_CONTENT + environment variable is set to 'TRUE' (case insensitive). + """ + with patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "TRUE"}): + assert should_send_prompts() is True + + + @pytest.mark.happy_path + def test_should_send_prompts_env_var_not_set(self): + """ + Test that should_send_prompts returns True when the TRACELOOP_TRACE_CONTENT + environment variable is not set, defaulting to 'true'. + """ + with patch.dict(os.environ, {}, clear=True): + assert should_send_prompts() is True + + @pytest.mark.edge_case + def test_should_send_prompts_override_enable_content_tracing(self): + """ + Test that should_send_prompts returns True when the context API has + 'override_enable_content_tracing' set to True, regardless of the environment variable. + """ + with patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "false"}): + with patch('opentelemetry.context.get_value', return_value=True): + assert should_send_prompts() is True + + @pytest.mark.edge_case + def test_should_send_prompts_override_enable_content_tracing_false(self): + """ + Test that should_send_prompts returns False when the context API has + 'override_enable_content_tracing' set to False and the environment variable is 'false'. + """ + with patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "false"}): + with patch('opentelemetry.context.get_value', return_value=False): + assert should_send_prompts() is False + + @pytest.mark.edge_case + def test_should_send_prompts_no_env_var_no_override(self): + """ + Test that should_send_prompts returns True when neither the environment variable + nor the context API override is set, defaulting to 'true'. + """ + with patch.dict(os.environ, {}, clear=True): + with patch('opentelemetry.context.get_value', return_value=False): + assert should_send_prompts() is True \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_sync_wrapper.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_sync_wrapper.py new file mode 100644 index 000000000..6bcf594b8 --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_sync_wrapper.py @@ -0,0 +1,52 @@ +import pytest +import logging +from unittest.mock import Mock, patch +from opentelemetry.instrumentation.anthropic.utils import dont_throw + +# Mock Config to avoid side effects during testing +class MockConfig: + exception_logger = Mock() + +# Sample function to be wrapped +def sample_function(x, y): + return x + y + +# Sample function to raise an exception +def exception_function(x, y): + raise ValueError("An error occurred") + +@pytest.mark.describe("sync_wrapper") +class TestSyncWrapper: + + @pytest.mark.happy_path + def test_sync_wrapper_happy_path(self): + """ + Test that sync_wrapper correctly returns the result of a function without exceptions. + """ + wrapped_function = dont_throw(sample_function) + result = wrapped_function(2, 3) + assert result == 5, "Expected the wrapped function to return the sum of 2 and 3" + + @pytest.mark.edge_case + def test_sync_wrapper_with_no_arguments(self): + """ + Test that sync_wrapper works with functions that take no arguments. + """ + def no_arg_function(): + return "no args" + + wrapped_function = dont_throw(no_arg_function) + result = wrapped_function() + assert result == "no args", "Expected the wrapped function to return 'no args'" + + @pytest.mark.edge_case + def test_sync_wrapper_with_none_return(self): + """ + Test that sync_wrapper correctly handles functions that return None. + """ + def none_return_function(): + return None + + wrapped_function = dont_throw(none_return_function) + result = wrapped_function() + assert result is None, "Expected the wrapped function to return None" \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-anthropic/project.json b/packages/opentelemetry-instrumentation-anthropic/project.json index cb0420a1a..c859f22ef 100644 --- a/packages/opentelemetry-instrumentation-anthropic/project.json +++ b/packages/opentelemetry-instrumentation-anthropic/project.json @@ -63,6 +63,17 @@ "cwd": "packages/opentelemetry-instrumentation-anthropic" } }, + "test:early": { + "executor": "@nxlv/python:run-commands", + "outputs": [ + "{workspaceRoot}/reports/packages/opentelemetry-instrumentation-anthropic/unittests/early", + "{workspaceRoot}/coverage/packages/opentelemetry-instrumentation-anthropic/early" + ], + "options": { + "command": "poetry run pytest opentelemetry/instrumentation/anthropic/test_early_utils/", + "cwd": "packages/opentelemetry-instrumentation-anthropic" + } + }, "build-release": { "executor": "@nxlv/python:run-commands", "options": { diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_dont_throw.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_dont_throw.py new file mode 100644 index 000000000..94c9d4c9a --- /dev/null +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_dont_throw.py @@ -0,0 +1,80 @@ +import pytest +import logging +from unittest.mock import Mock, patch +from opentelemetry.instrumentation.groq.utils import dont_throw + +# Configure logging to capture log messages for assertions +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +# Describe block for dont_throw tests +@pytest.mark.describe("dont_throw") +class TestDontThrow: + + @pytest.mark.happy_path + def test_happy_path_function_execution(self): + """ + Test that a function wrapped with dont_throw executes successfully without exceptions. + """ + @dont_throw + def sample_function(x, y): + return x + y + + result = sample_function(2, 3) + assert result == 5, "The function should return the sum of the inputs." + + @pytest.mark.happy_path + def test_happy_path_no_exception_logging(self): + """ + Test that no exception is logged when the wrapped function executes without errors. + """ + @dont_throw + def sample_function(x, y): + return x + y + + with patch.object(logger, 'debug') as mock_debug: + sample_function(2, 3) + mock_debug.assert_not_called() + + @pytest.mark.edge_case + def test_edge_case_custom_exception_logger(self): + """ + Test that a custom exception logger is called when an exception occurs. + """ + custom_logger = Mock() + + @dont_throw + def sample_function(x, y): + return x / y + + with patch('opentelemetry.instrumentation.groq.config.Config.exception_logger', custom_logger): + sample_function(2, 0) + custom_logger.assert_called_once() + + @pytest.mark.edge_case + def test_edge_case_function_with_no_arguments(self): + """ + Test that a function with no arguments wrapped with dont_throw executes correctly. + """ + @dont_throw + def sample_function(): + return "No args" + + result = sample_function() + assert result == "No args", "The function should return the expected string." + + @pytest.mark.edge_case + def test_edge_case_function_with_kwargs(self): + """ + Test that a function with keyword arguments wrapped with dont_throw executes correctly. + """ + @dont_throw + def sample_function(x, y=10): + return x + y + + result = sample_function(5, y=15) + assert result == 20, "The function should correctly handle keyword arguments." + +# Run the tests +if __name__ == "__main__": + pytest.main() \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_error_metrics_attributes.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_error_metrics_attributes.py new file mode 100644 index 000000000..08fc3d43d --- /dev/null +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_error_metrics_attributes.py @@ -0,0 +1,68 @@ +import pytest +from opentelemetry.instrumentation.groq.utils import error_metrics_attributes + +# Describe block for all tests related to error_metrics_attributes +@pytest.mark.describe("Tests for error_metrics_attributes function") +class TestErrorMetricsAttributes: + + @pytest.mark.happy_path + def test_error_metrics_attributes_with_standard_exception(self): + """ + Test that error_metrics_attributes correctly extracts the class name + of a standard exception and returns the expected dictionary. + """ + exception = ValueError("An error occurred") + result = error_metrics_attributes(exception) + assert result == { + "gen_ai.system": "groq", + "error.type": "ValueError", + } + + @pytest.mark.happy_path + def test_error_metrics_attributes_with_custom_exception(self): + """ + Test that error_metrics_attributes correctly extracts the class name + of a custom exception and returns the expected dictionary. + """ + class CustomException(Exception): + pass + + exception = CustomException("A custom error occurred") + result = error_metrics_attributes(exception) + assert result == { + "gen_ai.system": "groq", + "error.type": "CustomException", + } + + @pytest.mark.edge_case + def test_error_metrics_attributes_with_no_message_exception(self): + """ + Test that error_metrics_attributes handles an exception with no message + and returns the expected dictionary. + """ + exception = Exception() + result = error_metrics_attributes(exception) + assert result == { + "gen_ai.system": "groq", + "error.type": "Exception", + } + + @pytest.mark.edge_case + def test_error_metrics_attributes_with_non_standard_exception(self): + """ + Test that error_metrics_attributes handles a non-standard exception + (not derived from Exception) and returns the expected dictionary. + """ + class NonStandardException: + pass + + exception = NonStandardException() + result = error_metrics_attributes(exception) + assert result == { + "gen_ai.system": "groq", + "error.type": "NonStandardException", + } + +# Run the tests +if __name__ == "__main__": + pytest.main() \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_model_as_dict.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_model_as_dict.py new file mode 100644 index 000000000..0f808b4b8 --- /dev/null +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_model_as_dict.py @@ -0,0 +1,71 @@ +import pytest +from unittest.mock import MagicMock, patch +from importlib.metadata import version +from opentelemetry.instrumentation.groq.utils import model_as_dict + +# Describe block for all tests related to model_as_dict +@pytest.mark.describe("model_as_dict function") +class TestModelAsDict: + + @pytest.mark.happy_path + def test_model_as_dict_with_pydantic_v1(self): + """ + Test that model_as_dict correctly calls the dict() method on a Pydantic v1 model. + """ + mock_model = MagicMock() + mock_model.dict.return_value = {"model": "test_model"} + + with patch("opentelemetry.instrumentation.groq.utils.version", return_value="1.9.0"): + result = model_as_dict(mock_model) + + assert result == {"model": "test_model"} + mock_model.dict.assert_called_once() + + @pytest.mark.happy_path + def test_model_as_dict_with_pydantic_v2(self): + """ + Test that model_as_dict correctly calls the model_dump() method on a Pydantic v2 model. + """ + mock_model = MagicMock() + mock_model.model_dump.return_value = {"model": "test_model"} + + with patch("opentelemetry.instrumentation.groq.utils.version", return_value="2.0.0"): + result = model_as_dict(mock_model) + + assert result == {"model": "test_model"} + mock_model.model_dump.assert_called_once() + + + @pytest.mark.edge_case + def test_model_as_dict_with_non_pydantic_object(self): + """ + Test that model_as_dict returns the object itself if it is not a Pydantic model. + """ + non_pydantic_object = {"model": "non_pydantic_model"} + + result = model_as_dict(non_pydantic_object) + + assert result == non_pydantic_object + + @pytest.mark.edge_case + def test_model_as_dict_with_empty_model(self): + """ + Test that model_as_dict handles an empty model gracefully. + """ + empty_model = MagicMock() + empty_model.dict.return_value = {} + + with patch("opentelemetry.instrumentation.groq.utils.version", return_value="1.9.0"): + result = model_as_dict(empty_model) + + assert result == {} + empty_model.dict.assert_called_once() + + @pytest.mark.edge_case + def test_model_as_dict_with_none(self): + """ + Test that model_as_dict returns None when given None as input. + """ + result = model_as_dict(None) + + assert result is None \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_set_span_attribute.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_set_span_attribute.py new file mode 100644 index 000000000..cf8ec2322 --- /dev/null +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_set_span_attribute.py @@ -0,0 +1,74 @@ +import pytest +from unittest.mock import Mock + +# Assuming the set_span_attribute function is imported from the correct module +from opentelemetry.instrumentation.groq.utils import set_span_attribute + +@pytest.mark.describe("Tests for set_span_attribute function") +class TestSetSpanAttribute: + + @pytest.mark.happy_path + def test_set_attribute_with_valid_value(self): + """ + Test that set_span_attribute sets the attribute when a valid non-empty value is provided. + """ + span = Mock() + set_span_attribute(span, "test.attribute", "valid_value") + span.set_attribute.assert_called_once_with("test.attribute", "valid_value") + + @pytest.mark.happy_path + def test_set_attribute_with_none_value(self): + """ + Test that set_span_attribute does not set the attribute when the value is None. + """ + span = Mock() + set_span_attribute(span, "test.attribute", None) + span.set_attribute.assert_not_called() + + @pytest.mark.happy_path + def test_set_attribute_with_empty_string(self): + """ + Test that set_span_attribute does not set the attribute when the value is an empty string. + """ + span = Mock() + set_span_attribute(span, "test.attribute", "") + span.set_attribute.assert_not_called() + + @pytest.mark.edge_case + def test_set_attribute_with_whitespace_string(self): + """ + Test that set_span_attribute sets the attribute when the value is a whitespace string. + """ + span = Mock() + set_span_attribute(span, "test.attribute", " ") + span.set_attribute.assert_called_once_with("test.attribute", " ") + + @pytest.mark.edge_case + def test_set_attribute_with_special_characters(self): + """ + Test that set_span_attribute sets the attribute when the value contains special characters. + """ + span = Mock() + special_value = "!@#$%^&*()_+" + set_span_attribute(span, "test.attribute", special_value) + span.set_attribute.assert_called_once_with("test.attribute", special_value) + + @pytest.mark.edge_case + def test_set_attribute_with_numeric_value(self): + """ + Test that set_span_attribute sets the attribute when the value is a numeric type. + """ + span = Mock() + numeric_value = 12345 + set_span_attribute(span, "test.attribute", numeric_value) + span.set_attribute.assert_called_once_with("test.attribute", numeric_value) + + @pytest.mark.edge_case + def test_set_attribute_with_boolean_value(self): + """ + Test that set_span_attribute sets the attribute when the value is a boolean type. + """ + span = Mock() + boolean_value = True + set_span_attribute(span, "test.attribute", boolean_value) + span.set_attribute.assert_called_once_with("test.attribute", boolean_value) \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_shared_metrics_attributes.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_shared_metrics_attributes.py new file mode 100644 index 000000000..b104fb615 --- /dev/null +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_shared_metrics_attributes.py @@ -0,0 +1,96 @@ +import pytest +from unittest.mock import MagicMock, patch +from opentelemetry.instrumentation.groq.utils import shared_metrics_attributes +from opentelemetry.instrumentation.groq.config import Config +from opentelemetry.semconv_ai import SpanAttributes + +# Mocking the Config class to control the behavior of get_common_metrics_attributes +@pytest.fixture +def mock_config(): + with patch('opentelemetry.instrumentation.groq.config.Config.get_common_metrics_attributes') as mock: + yield mock + +# Mocking the model_as_dict function +@pytest.fixture +def mock_model_as_dict(): + with patch('opentelemetry.instrumentation.groq.utils.model_as_dict') as mock: + yield mock + +@pytest.mark.describe("shared_metrics_attributes") +class TestSharedMetricsAttributes: + + @pytest.mark.happy_path + def test_shared_metrics_attributes_with_valid_response(self, mock_config, mock_model_as_dict): + """ + Test that shared_metrics_attributes returns the correct attributes + when given a valid response object. + """ + # Arrange + mock_config.return_value = {"common_attr": "value"} + mock_model_as_dict.return_value = {"model": "test_model"} + response = MagicMock() + + # Act + result = shared_metrics_attributes(response) + + # Assert + assert result == { + "common_attr": "value", + "gen_ai.system": "groq", + SpanAttributes.LLM_RESPONSE_MODEL: "test_model" + } + + @pytest.mark.happy_path + def test_shared_metrics_attributes_with_empty_common_attributes(self, mock_config, mock_model_as_dict): + """ + Test that shared_metrics_attributes handles empty common attributes correctly. + """ + # Arrange + mock_config.return_value = {} + mock_model_as_dict.return_value = {"model": "test_model"} + response = MagicMock() + + # Act + result = shared_metrics_attributes(response) + + # Assert + assert result == { + "gen_ai.system": "groq", + SpanAttributes.LLM_RESPONSE_MODEL: "test_model" + } + + @pytest.mark.edge_case + def test_shared_metrics_attributes_with_no_model_in_response(self, mock_config, mock_model_as_dict): + """ + Test that shared_metrics_attributes handles a response with no model attribute. + """ + # Arrange + mock_config.return_value = {"common_attr": "value"} + mock_model_as_dict.return_value = {} + response = MagicMock() + + # Act + result = shared_metrics_attributes(response) + + # Assert + assert result == { + "common_attr": "value", + "gen_ai.system": "groq", + SpanAttributes.LLM_RESPONSE_MODEL: None + } + + @pytest.mark.edge_case + def test_shared_metrics_attributes_with_exception_in_model_as_dict(self, mock_config, mock_model_as_dict): + """ + Test that shared_metrics_attributes handles exceptions in model_as_dict gracefully. + """ + # Arrange + mock_config.return_value = {"common_attr": "value"} + mock_model_as_dict.side_effect = Exception("Test Exception") + response = MagicMock() + + # Act + result = shared_metrics_attributes(response) + + # Assert + assert result is None # Since the function is decorated with @dont_throw, it should return None on exception \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_should_send_prompts.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_should_send_prompts.py new file mode 100644 index 000000000..2cd711120 --- /dev/null +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_should_send_prompts.py @@ -0,0 +1,48 @@ +import os +import pytest +from unittest import mock +from opentelemetry.instrumentation.groq.utils import should_send_prompts + +# Describe block for should_send_prompts tests +@pytest.mark.describe("Tests for should_send_prompts function") +class TestShouldSendPrompts: + + @pytest.mark.happy_path + def test_should_send_prompts_env_var_true(self): + """ + Test that should_send_prompts returns True when TRACELOOP_TRACE_CONTENT is set to 'true'. + """ + with mock.patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "true"}): + assert should_send_prompts() is True + + @pytest.mark.happy_path + def test_should_send_prompts_env_var_true_case_insensitive(self): + """ + Test that should_send_prompts returns True when TRACELOOP_TRACE_CONTENT is set to 'TRUE' (case insensitive). + """ + with mock.patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "TRUE"}): + assert should_send_prompts() is True + + @pytest.mark.happy_path + def test_should_send_prompts_env_var_not_set(self): + """ + Test that should_send_prompts returns True when TRACELOOP_TRACE_CONTENT is not set and context_api.get_value returns True. + """ + with mock.patch.dict(os.environ, {}, clear=True): + with mock.patch('opentelemetry.context.get_value', return_value=True): + assert should_send_prompts() is True + + @pytest.mark.edge_case + def test_should_send_prompts_context_override(self): + """ + Test that should_send_prompts returns True when context_api.get_value returns True, regardless of TRACELOOP_TRACE_CONTENT. + """ + with mock.patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "false"}): + with mock.patch('opentelemetry.context.get_value', return_value=True): + assert should_send_prompts() is True + + + +# Run the tests +if __name__ == "__main__": + pytest.main() \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-groq/project.json b/packages/opentelemetry-instrumentation-groq/project.json index 05b23027a..1c49372f1 100644 --- a/packages/opentelemetry-instrumentation-groq/project.json +++ b/packages/opentelemetry-instrumentation-groq/project.json @@ -63,6 +63,17 @@ "cwd": "packages/opentelemetry-instrumentation-groq" } }, + "test:early": { + "executor": "@nxlv/python:run-commands", + "outputs": [ + "{workspaceRoot}/reports/packages/opentelemetry-instrumentation-groq/unittests/early", + "{workspaceRoot}/coverage/packages/opentelemetry-instrumentation-groq/early" + ], + "options": { + "command": "poetry run pytest opentelemetry/instrumentation/groq/test_early_utils/", + "cwd": "packages/opentelemetry-instrumentation-groq" + } + }, "build-release": { "executor": "@nxlv/python:run-commands", "options": { diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/__init__.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_request.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_request.py new file mode 100644 index 000000000..b779ff25c --- /dev/null +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_request.py @@ -0,0 +1,96 @@ +import pytest +from unittest.mock import Mock, patch +import json +from opentelemetry.instrumentation.haystack.utils import process_request +from opentelemetry.semconv_ai import SpanAttributes + +# Mocking the context API and Config for testing +@pytest.fixture(autouse=True) +def mock_context_api(monkeypatch): + mock_context = Mock() + monkeypatch.setattr("opentelemetry.instrumentation.haystack.utils.context_api", mock_context) + return mock_context + +@pytest.fixture(autouse=True) +def mock_config(monkeypatch): + mock_config = Mock() + monkeypatch.setattr("opentelemetry.instrumentation.haystack.utils.Config", mock_config) + return mock_config + +@pytest.fixture +def mock_span(): + return Mock() + +@pytest.mark.describe("process_request function") +class TestProcessRequest: + + + @pytest.mark.happy_path + def test_process_request_with_empty_args_and_kwargs(self, mock_span): + """ + Test that process_request handles empty args and kwargs gracefully. + """ + args = () + kwargs = {} + + with patch("opentelemetry.instrumentation.haystack.utils.should_send_prompts", return_value=True): + process_request(mock_span, args, kwargs) + + expected_input_entity = { + "args": [], + "kwargs": {} + } + mock_span.set_attribute.assert_called_once_with( + SpanAttributes.TRACELOOP_ENTITY_INPUT, + json.dumps(expected_input_entity) + ) + + @pytest.mark.edge_case + def test_process_request_with_non_dict_args(self, mock_span): + """ + Test that process_request correctly handles non-dict args. + """ + args = ("arg1", 123, 45.6) + kwargs = {"kwarg1": "value1"} + + with patch("opentelemetry.instrumentation.haystack.utils.should_send_prompts", return_value=True): + process_request(mock_span, args, kwargs) + + expected_input_entity = { + "args": ["arg1", 123, 45.6], + "kwargs": {"kwarg1": "value1"} + } + mock_span.set_attribute.assert_called_once_with( + SpanAttributes.TRACELOOP_ENTITY_INPUT, + json.dumps(expected_input_entity) + ) + + @pytest.mark.edge_case + def test_process_request_with_should_send_prompts_false(self, mock_span): + """ + Test that process_request does not set attributes when should_send_prompts is False. + """ + args = ({"key1": "value1"},) + kwargs = {"kwarg1": "value2"} + + with patch("opentelemetry.instrumentation.haystack.utils.should_send_prompts", return_value=False): + process_request(mock_span, args, kwargs) + + mock_span.set_attribute.assert_not_called() + + @pytest.mark.edge_case + def test_process_request_with_exception_handling(self, mock_span, mock_config): + """ + Test that process_request handles exceptions and logs them without throwing. + """ + args = ({"key1": "value1"},) + kwargs = {"kwarg1": "value2"} + + # Simulate an exception in set_attribute + mock_span.set_attribute.side_effect = Exception("Test exception") + + with patch("opentelemetry.instrumentation.haystack.utils.should_send_prompts", return_value=True): + process_request(mock_span, args, kwargs) + + # Ensure exception logger is called + mock_config.exception_logger.assert_called_once() \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_response.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_response.py new file mode 100644 index 000000000..b3f749b16 --- /dev/null +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_response.py @@ -0,0 +1,98 @@ +import pytest +from unittest.mock import Mock, patch +import json +from opentelemetry.instrumentation.haystack.utils import process_response +from opentelemetry.semconv_ai import SpanAttributes + +# Mocking the should_send_prompts function to control its behavior during tests +@pytest.fixture +def mock_should_send_prompts(): + with patch('opentelemetry.instrumentation.haystack.utils.should_send_prompts') as mock: + yield mock + +# Describe block for process_response tests +@pytest.mark.describe("process_response function") +class TestProcessResponse: + + @pytest.mark.happy_path + def test_process_response_happy_path(self, mock_should_send_prompts): + """ + Test that process_response sets the correct attribute on the span + when should_send_prompts returns True. + """ + mock_should_send_prompts.return_value = True + span = Mock() + response = {"key": "value"} + + process_response(span, response) + + span.set_attribute.assert_called_once_with( + SpanAttributes.TRACELOOP_ENTITY_OUTPUT, + json.dumps(response) + ) + + @pytest.mark.happy_path + def test_process_response_no_attribute_set_when_prompts_disabled(self, mock_should_send_prompts): + """ + Test that process_response does not set any attribute on the span + when should_send_prompts returns False. + """ + mock_should_send_prompts.return_value = False + span = Mock() + response = {"key": "value"} + + process_response(span, response) + + span.set_attribute.assert_not_called() + + @pytest.mark.edge_case + def test_process_response_with_empty_response(self, mock_should_send_prompts): + """ + Test that process_response handles an empty response correctly. + """ + mock_should_send_prompts.return_value = True + span = Mock() + response = {} + + process_response(span, response) + + span.set_attribute.assert_called_once_with( + SpanAttributes.TRACELOOP_ENTITY_OUTPUT, + json.dumps(response) + ) + + @pytest.mark.edge_case + def test_process_response_with_none_response(self, mock_should_send_prompts): + """ + Test that process_response handles a None response gracefully. + """ + mock_should_send_prompts.return_value = True + span = Mock() + response = None + + process_response(span, response) + + span.set_attribute.assert_called_once_with( + SpanAttributes.TRACELOOP_ENTITY_OUTPUT, + json.dumps(response) + ) + + @pytest.mark.edge_case + def test_process_response_with_complex_response(self, mock_should_send_prompts): + """ + Test that process_response can handle complex nested response objects. + """ + mock_should_send_prompts.return_value = True + span = Mock() + response = {"key": {"nested_key": "nested_value"}} + + process_response(span, response) + + span.set_attribute.assert_called_once_with( + SpanAttributes.TRACELOOP_ENTITY_OUTPUT, + json.dumps(response) + ) + +# Run the tests +if __name__ == "__main__": + pytest.main() \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_set_span_attribute.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_set_span_attribute.py new file mode 100644 index 000000000..d6899ec9e --- /dev/null +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_set_span_attribute.py @@ -0,0 +1,101 @@ +import pytest +from unittest.mock import Mock + +# Assuming the set_span_attribute function is imported from the correct module +from opentelemetry.instrumentation.haystack.utils import set_span_attribute + +@pytest.mark.describe("set_span_attribute") +class TestSetSpanAttribute: + + @pytest.mark.happy_path + def test_set_span_attribute_with_valid_value(self): + """ + Test that set_span_attribute sets the attribute on the span when a valid value is provided. + """ + span = Mock() + name = "test.attribute" + value = "test_value" + + set_span_attribute(span, name, value) + + span.set_attribute.assert_called_once_with(name, value) + + @pytest.mark.happy_path + def test_set_span_attribute_with_empty_string(self): + """ + Test that set_span_attribute does not set the attribute when the value is an empty string. + """ + span = Mock() + name = "test.attribute" + value = "" + + set_span_attribute(span, name, value) + + span.set_attribute.assert_not_called() + + @pytest.mark.happy_path + def test_set_span_attribute_with_none_value(self): + """ + Test that set_span_attribute does not set the attribute when the value is None. + """ + span = Mock() + name = "test.attribute" + value = None + + set_span_attribute(span, name, value) + + span.set_attribute.assert_not_called() + + @pytest.mark.edge_case + def test_set_span_attribute_with_numeric_value(self): + """ + Test that set_span_attribute sets the attribute on the span when a numeric value is provided. + """ + span = Mock() + name = "test.attribute" + value = 123 + + set_span_attribute(span, name, value) + + span.set_attribute.assert_called_once_with(name, value) + + @pytest.mark.edge_case + def test_set_span_attribute_with_boolean_value(self): + """ + Test that set_span_attribute sets the attribute on the span when a boolean value is provided. + """ + span = Mock() + name = "test.attribute" + value = True + + set_span_attribute(span, name, value) + + span.set_attribute.assert_called_once_with(name, value) + + @pytest.mark.edge_case + def test_set_span_attribute_with_special_characters(self): + """ + Test that set_span_attribute sets the attribute on the span when the value contains special characters. + """ + span = Mock() + name = "test.attribute" + value = "!@#$%^&*()_+" + + set_span_attribute(span, name, value) + + span.set_attribute.assert_called_once_with(name, value) + + @pytest.mark.edge_case + def test_set_span_attribute_with_large_string(self): + """ + Test that set_span_attribute sets the attribute on the span when a very large string is provided. + """ + span = Mock() + name = "test.attribute" + value = "a" * 10000 # Large string + + set_span_attribute(span, name, value) + + span.set_attribute.assert_called_once_with(name, value) + +# To run the tests, use the command: pytest -v \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_should_send_prompts.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_should_send_prompts.py new file mode 100644 index 000000000..5ef90b2f2 --- /dev/null +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_should_send_prompts.py @@ -0,0 +1,39 @@ +import os +import pytest +from unittest import mock +from opentelemetry.instrumentation.haystack.utils import should_send_prompts +from opentelemetry import context as context_api + +@pytest.mark.describe("Tests for should_send_prompts function") +class TestShouldSendPrompts: + + @pytest.mark.happy_path + def test_should_send_prompts_env_var_true(self): + """Test should_send_prompts returns True when TRACELOOP_TRACE_CONTENT is 'true'.""" + with mock.patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "true"}): + assert should_send_prompts() is True + + + @pytest.mark.happy_path + def test_should_send_prompts_env_var_not_set(self): + """Test should_send_prompts returns True when TRACELOOP_TRACE_CONTENT is not set.""" + with mock.patch.dict(os.environ, {}, clear=True): + assert should_send_prompts() is True + + + + @pytest.mark.edge_case + def test_should_send_prompts_env_var_case_insensitivity(self): + """Test should_send_prompts handles TRACELOOP_TRACE_CONTENT case insensitively.""" + with mock.patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "TrUe"}): + assert should_send_prompts() is True + + + @pytest.mark.edge_case + def test_should_send_prompts_no_env_var_and_no_context(self): + """Test should_send_prompts returns True when neither TRACELOOP_TRACE_CONTENT nor context is set.""" + with mock.patch.dict(os.environ, {}, clear=True): + context_api.set_value("override_enable_content_tracing", None) + assert should_send_prompts() is True + +# To run the tests, use the command: pytest -v \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-haystack/project.json b/packages/opentelemetry-instrumentation-haystack/project.json index 598c58232..ddee120df 100644 --- a/packages/opentelemetry-instrumentation-haystack/project.json +++ b/packages/opentelemetry-instrumentation-haystack/project.json @@ -63,6 +63,17 @@ "cwd": "packages/opentelemetry-instrumentation-haystack" } }, + "test:early": { + "executor": "@nxlv/python:run-commands", + "outputs": [ + "{workspaceRoot}/reports/packages/opentelemetry-instrumentation-haystack/unittests/early", + "{workspaceRoot}/coverage/packages/opentelemetry-instrumentation-haystack/early" + ], + "options": { + "command": "poetry run pytest opentelemetry/instrumentation/haystack/test_early_utils/", + "cwd": "packages/opentelemetry-instrumentation-haystack" + } + }, "build-release": { "executor": "@nxlv/python:run-commands", "options": { diff --git a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/__init__.py b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_dont_throw.py b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_dont_throw.py new file mode 100644 index 000000000..47ce73c7d --- /dev/null +++ b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_dont_throw.py @@ -0,0 +1,48 @@ +import pytest +import logging +from unittest.mock import Mock, patch +from opentelemetry.instrumentation.pinecone.utils import dont_throw + +# Create a mock logger to capture log outputs +class MockLogger: + def __init__(self): + self.messages = [] + + def debug(self, msg, *args): + self.messages.append(msg % args) + +@pytest.mark.describe("dont_throw") +class TestDontThrow: + + @pytest.mark.happy_path + def test_function_executes_without_exception(self): + """Test that the wrapped function executes successfully without exceptions.""" + mock_func = Mock(return_value="success") + wrapped_func = dont_throw(mock_func) + + result = wrapped_func() + + assert result == "success" + mock_func.assert_called_once() + + @pytest.mark.happy_path + def test_function_with_arguments(self): + """Test that the wrapped function executes successfully with arguments.""" + mock_func = Mock(return_value="success") + wrapped_func = dont_throw(mock_func) + + result = wrapped_func(1, 2, key="value") + + assert result == "success" + mock_func.assert_called_once_with(1, 2, key="value") + + @pytest.mark.edge_case + def test_function_with_no_return_value(self): + """Test that the wrapped function handles functions with no return value.""" + mock_func = Mock(return_value=None) + wrapped_func = dont_throw(mock_func) + + result = wrapped_func() + + assert result is None + mock_func.assert_called_once() \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_is_metrics_enabled.py b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_is_metrics_enabled.py new file mode 100644 index 000000000..ef37f4614 --- /dev/null +++ b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_is_metrics_enabled.py @@ -0,0 +1,78 @@ +import os +import pytest +from opentelemetry.instrumentation.pinecone.utils import is_metrics_enabled + +@pytest.mark.describe("is_metrics_enabled function") +class TestIsMetricsEnabled: + + @pytest.mark.happy_path + def test_metrics_enabled_true(self): + """ + Test that is_metrics_enabled returns True when TRACELOOP_METRICS_ENABLED is set to 'true'. + """ + os.environ["TRACELOOP_METRICS_ENABLED"] = "true" + assert is_metrics_enabled() is True + + @pytest.mark.happy_path + def test_metrics_enabled_true_case_insensitive(self): + """ + Test that is_metrics_enabled returns True when TRACELOOP_METRICS_ENABLED is set to 'TRUE' (case insensitive). + """ + os.environ["TRACELOOP_METRICS_ENABLED"] = "TRUE" + assert is_metrics_enabled() is True + + @pytest.mark.happy_path + def test_metrics_enabled_false(self): + """ + Test that is_metrics_enabled returns False when TRACELOOP_METRICS_ENABLED is set to 'false'. + """ + os.environ["TRACELOOP_METRICS_ENABLED"] = "false" + assert is_metrics_enabled() is False + + @pytest.mark.happy_path + def test_metrics_enabled_false_case_insensitive(self): + """ + Test that is_metrics_enabled returns False when TRACELOOP_METRICS_ENABLED is set to 'FALSE' (case insensitive). + """ + os.environ["TRACELOOP_METRICS_ENABLED"] = "FALSE" + assert is_metrics_enabled() is False + + @pytest.mark.edge_case + def test_metrics_enabled_unset(self): + """ + Test that is_metrics_enabled returns True when TRACELOOP_METRICS_ENABLED is not set. + """ + if "TRACELOOP_METRICS_ENABLED" in os.environ: + del os.environ["TRACELOOP_METRICS_ENABLED"] + assert is_metrics_enabled() is True + + @pytest.mark.edge_case + def test_metrics_enabled_random_string(self): + """ + Test that is_metrics_enabled returns False when TRACELOOP_METRICS_ENABLED is set to a random string. + """ + os.environ["TRACELOOP_METRICS_ENABLED"] = "random_string" + assert is_metrics_enabled() is False + + @pytest.mark.edge_case + def test_metrics_enabled_numeric_string(self): + """ + Test that is_metrics_enabled returns False when TRACELOOP_METRICS_ENABLED is set to a numeric string. + """ + os.environ["TRACELOOP_METRICS_ENABLED"] = "123" + assert is_metrics_enabled() is False + + @pytest.mark.edge_case + def test_metrics_enabled_none_string(self): + """ + Test that is_metrics_enabled returns False when TRACELOOP_METRICS_ENABLED is set to 'None'. + """ + os.environ["TRACELOOP_METRICS_ENABLED"] = "None" + assert is_metrics_enabled() is False + +# Clean up environment variable after tests +@pytest.fixture(autouse=True) +def cleanup_env(): + yield + if "TRACELOOP_METRICS_ENABLED" in os.environ: + del os.environ["TRACELOOP_METRICS_ENABLED"] \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_set_span_attribute.py b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_set_span_attribute.py new file mode 100644 index 000000000..249453deb --- /dev/null +++ b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_set_span_attribute.py @@ -0,0 +1,73 @@ +import pytest +from unittest.mock import Mock + +# Assuming the set_span_attribute function is imported from the correct module +from opentelemetry.instrumentation.pinecone.utils import set_span_attribute + +@pytest.mark.describe("Tests for set_span_attribute function") +class TestSetSpanAttribute: + + @pytest.mark.happy_path + def test_set_attribute_with_valid_name_and_value(self): + """ + Test that set_span_attribute sets the attribute when both name and value are valid. + """ + span = Mock() + set_span_attribute(span, "test_name", "test_value") + span.set_attribute.assert_called_once_with("test_name", "test_value") + + @pytest.mark.happy_path + def test_set_attribute_with_valid_name_and_empty_value(self): + """ + Test that set_span_attribute does not set the attribute when the value is an empty string. + """ + span = Mock() + set_span_attribute(span, "test_name", "") + span.set_attribute.assert_not_called() + + @pytest.mark.happy_path + def test_set_attribute_with_valid_name_and_none_value(self): + """ + Test that set_span_attribute does not set the attribute when the value is None. + """ + span = Mock() + set_span_attribute(span, "test_name", None) + span.set_attribute.assert_not_called() + + @pytest.mark.edge_case + def test_set_attribute_with_empty_name_and_valid_value(self): + """ + Test that set_span_attribute sets the attribute when the name is empty but the value is valid. + """ + span = Mock() + set_span_attribute(span, "", "test_value") + span.set_attribute.assert_called_once_with("", "test_value") + + @pytest.mark.edge_case + def test_set_attribute_with_none_name_and_valid_value(self): + """ + Test that set_span_attribute does not set the attribute when the name is None. + """ + span = Mock() + set_span_attribute(span, None, "test_value") + span.set_attribute.assert_not_called() + + @pytest.mark.edge_case + def test_set_attribute_with_special_characters_in_name_and_value(self): + """ + Test that set_span_attribute sets the attribute when the name and value contain special characters. + """ + span = Mock() + set_span_attribute(span, "name!@#", "value$%^") + span.set_attribute.assert_called_once_with("name!@#", "value$%^") + + @pytest.mark.edge_case + def test_set_attribute_with_numeric_name_and_value(self): + """ + Test that set_span_attribute sets the attribute when the name and value are numeric. + """ + span = Mock() + set_span_attribute(span, 123, 456) + span.set_attribute.assert_called_once_with(123, 456) + +# To run the tests, use the command: pytest -v \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-pinecone/project.json b/packages/opentelemetry-instrumentation-pinecone/project.json index 56e0950e2..0c3cdc308 100644 --- a/packages/opentelemetry-instrumentation-pinecone/project.json +++ b/packages/opentelemetry-instrumentation-pinecone/project.json @@ -63,6 +63,17 @@ "cwd": "packages/opentelemetry-instrumentation-pinecone" } }, + "test:early": { + "executor": "@nxlv/python:run-commands", + "outputs": [ + "{workspaceRoot}/reports/packages/opentelemetry-instrumentation-pinecone/unittests/early", + "{workspaceRoot}/coverage/packages/opentelemetry-instrumentation-pinecone/early" + ], + "options": { + "command": "poetry run pytest opentelemetry/instrumentation/pinecone/test_early_utils/", + "cwd": "packages/opentelemetry-instrumentation-pinecone" + } + }, "build-release": { "executor": "@nxlv/python:run-commands", "options": { diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 000000000..b8bd36f86 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,5 @@ +[pytest] +markers = + describe: Custom marker for describing test groups + happy_path: Tests the 'happy path' of a function + edge_case: Tests edge cases of a function From 86d8cac37f361b1e1f5d416a0cbf6a36da2041c0 Mon Sep 17 00:00:00 2001 From: itamarEarly Date: Tue, 4 Feb 2025 10:38:47 +0200 Subject: [PATCH 2/2] fix: applied autopep8 formatting for linting errors --- .../instrumentation/anthropic/__init__.py | 33 ++++++-------- .../instrumentation/anthropic/config.py | 1 + .../instrumentation/anthropic/streaming.py | 12 ++---- .../test_early__handle_exception.py | 11 ++++- ...early_acount_prompt_tokens_from_request.py | 23 +++++----- .../test_early_async_wrapper.py | 10 +++-- ..._early_count_prompt_tokens_from_request.py | 43 +++++++++---------- .../test_early_error_metrics_attributes.py | 7 ++- .../test_early_utils/test_early_run_async.py | 5 +-- .../test_early_set_span_attribute.py | 7 ++- .../test_early_shared_metrics_attributes.py | 11 +++-- .../test_early_should_send_prompts.py | 7 +-- .../test_early_sync_wrapper.py | 14 ++++-- .../instrumentation/anthropic/utils.py | 3 +- .../tests/conftest.py | 9 ++-- .../tests/test_completion.py | 2 +- .../instrumentation/groq/__init__.py | 22 ++++------ .../test_early_utils/test_early_dont_throw.py | 10 +++-- .../test_early_error_metrics_attributes.py | 5 ++- .../test_early_model_as_dict.py | 10 +++-- .../test_early_set_span_attribute.py | 5 ++- .../test_early_shared_metrics_attributes.py | 12 ++++-- .../test_early_should_send_prompts.py | 8 ++-- .../instrumentation/groq/utils.py | 5 ++- .../tests/traces/conftest.py | 11 +++-- .../instrumentation/haystack/__init__.py | 20 ++++----- .../test_early_process_request.py | 13 ++++-- .../test_early_process_response.py | 12 ++++-- .../test_early_set_span_attribute.py | 35 +++++++-------- .../test_early_should_send_prompts.py | 12 +++--- .../instrumentation/haystack/wrap_node.py | 5 +-- .../instrumentation/haystack/wrap_openai.py | 13 +++--- .../instrumentation/haystack/wrap_pipeline.py | 13 +++--- .../tests/conftest.py | 6 ++- .../tests/test_simple_pipeline.py | 3 +- .../instrumentation/pinecone/__init__.py | 34 ++++++--------- .../pinecone/query_handlers.py | 3 +- .../test_early_utils/test_early_dont_throw.py | 22 ++++++---- .../test_early_is_metrics_enabled.py | 6 ++- .../test_early_set_span_attribute.py | 5 ++- .../instrumentation/pinecone/utils.py | 1 + .../tests/conftest.py | 16 ++++--- 42 files changed, 272 insertions(+), 233 deletions(-) diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py index 52459d9c3..f02d13960 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py @@ -4,39 +4,30 @@ import logging import os import time -from typing import Callable, Collection, Dict, Any, Optional -from typing_extensions import Coroutine +from typing import Any, Callable, Collection, Dict, Optional from anthropic._streaming import AsyncStream, Stream from opentelemetry import context as context_api from opentelemetry.instrumentation.anthropic.config import Config from opentelemetry.instrumentation.anthropic.streaming import ( - abuild_from_streaming_response, - build_from_streaming_response, -) + abuild_from_streaming_response, build_from_streaming_response) from opentelemetry.instrumentation.anthropic.utils import ( - acount_prompt_tokens_from_request, - dont_throw, - error_metrics_attributes, - count_prompt_tokens_from_request, - run_async, - set_span_attribute, - shared_metrics_attributes, - should_send_prompts, -) + acount_prompt_tokens_from_request, count_prompt_tokens_from_request, + dont_throw, error_metrics_attributes, run_async, set_span_attribute, + shared_metrics_attributes, should_send_prompts) from opentelemetry.instrumentation.anthropic.version import __version__ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap +from opentelemetry.instrumentation.utils import (_SUPPRESS_INSTRUMENTATION_KEY, + unwrap) from opentelemetry.metrics import Counter, Histogram, Meter, get_meter -from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GEN_AI_RESPONSE_ID +from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import \ + GEN_AI_RESPONSE_ID from opentelemetry.semconv_ai import ( - SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, - LLMRequestTypeValues, - SpanAttributes, - Meters, -) + SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, LLMRequestTypeValues, Meters, + SpanAttributes) from opentelemetry.trace import SpanKind, Tracer, get_tracer from opentelemetry.trace.status import Status, StatusCode +from typing_extensions import Coroutine from wrapt import wrap_function_wrapper logger = logging.getLogger(__name__) diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/config.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/config.py index 5eff0b909..885f4fdc7 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/config.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/config.py @@ -1,4 +1,5 @@ from typing import Callable, Optional + from typing_extensions import Coroutine diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py index 3c164bf9e..3f0d6a9ca 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py @@ -3,15 +3,11 @@ from opentelemetry.instrumentation.anthropic.config import Config from opentelemetry.instrumentation.anthropic.utils import ( - dont_throw, - error_metrics_attributes, - count_prompt_tokens_from_request, - set_span_attribute, - shared_metrics_attributes, - should_send_prompts, -) + count_prompt_tokens_from_request, dont_throw, error_metrics_attributes, + set_span_attribute, shared_metrics_attributes, should_send_prompts) from opentelemetry.metrics import Counter, Histogram -from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GEN_AI_RESPONSE_ID +from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import \ + GEN_AI_RESPONSE_ID from opentelemetry.semconv_ai import SpanAttributes from opentelemetry.trace.status import Status, StatusCode diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early__handle_exception.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early__handle_exception.py index 02a439d3b..ddd95b1b2 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early__handle_exception.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early__handle_exception.py @@ -1,19 +1,26 @@ -import pytest import logging from unittest.mock import Mock, patch + +import pytest from opentelemetry.instrumentation.anthropic.utils import dont_throw # Mock Config to control the behavior of exception_logger + + class MockConfig: exception_logger = None # Patch the Config used in the module with our MockConfig + + @pytest.fixture(autouse=True) def patch_config(): with patch('opentelemetry.instrumentation.anthropic.utils.Config', MockConfig): yield # Describe block for _handle_exception related tests + + @pytest.mark.describe("_handle_exception") class TestHandleException: @@ -68,4 +75,4 @@ def test_no_exception_logger(self): def exception_func(): raise ValueError("Test exception") - exception_func() # Should not raise any error \ No newline at end of file + exception_func() # Should not raise any error diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_acount_prompt_tokens_from_request.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_acount_prompt_tokens_from_request.py index 9c3ee5f9e..a04f9a80a 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_acount_prompt_tokens_from_request.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_acount_prompt_tokens_from_request.py @@ -1,13 +1,15 @@ -import pytest import asyncio from unittest.mock import AsyncMock +import pytest # Assuming the function is imported from the module -from opentelemetry.instrumentation.anthropic.utils import acount_prompt_tokens_from_request +from opentelemetry.instrumentation.anthropic.utils import \ + acount_prompt_tokens_from_request + @pytest.mark.describe("acount_prompt_tokens_from_request") class TestAcountPromptTokensFromRequest: - + @pytest.mark.happy_path @pytest.mark.asyncio async def test_single_prompt(self): @@ -15,22 +17,21 @@ async def test_single_prompt(self): anthropic = AsyncMock() anthropic.count_tokens = AsyncMock(return_value=5) request = {"prompt": "This is a test prompt."} - + result = await acount_prompt_tokens_from_request(anthropic, request) - + assert result == 5 anthropic.count_tokens.assert_awaited_once_with("This is a test prompt.") - @pytest.mark.edge_case @pytest.mark.asyncio async def test_no_prompt_or_messages(self): """Test with no prompt or messages to ensure zero tokens are counted.""" anthropic = AsyncMock() request = {} - + result = await acount_prompt_tokens_from_request(anthropic, request) - + assert result == 0 anthropic.count_tokens.assert_not_awaited() @@ -46,10 +47,8 @@ async def test_message_with_non_string_content(self): {"content": None} # None content ] } - + result = await acount_prompt_tokens_from_request(anthropic, request) - + assert result == 0 anthropic.count_tokens.assert_not_awaited() - - \ No newline at end of file diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_async_wrapper.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_async_wrapper.py index a67a0f963..2da1aae52 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_async_wrapper.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_async_wrapper.py @@ -1,18 +1,23 @@ -import pytest import asyncio import logging from unittest.mock import AsyncMock, MagicMock, patch + +import pytest from opentelemetry.instrumentation.anthropic.utils import dont_throw # Mock Config to avoid side effects during testing + + class MockConfig: exception_logger = None + @pytest.fixture(autouse=True) def mock_config(): with patch('opentelemetry.instrumentation.anthropic.utils.Config', new=MockConfig): yield + @pytest.mark.describe("Tests for async_wrapper") class TestAsyncWrapper: @@ -52,7 +57,6 @@ async def failing_coroutine(): assert result is None assert "OpenLLMetry failed to trace in failing_coroutine" in caplog.text - @pytest.mark.edge_case @pytest.mark.asyncio async def test_async_wrapper_no_exception_logger(self, caplog): @@ -70,4 +74,4 @@ async def failing_coroutine(): assert "OpenLLMetry failed to trace in failing_coroutine" in caplog.text assert MockConfig.exception_logger is None -# Note: The `@pytest.mark.asyncio` decorator is used to run async tests with pytest. \ No newline at end of file +# Note: The `@pytest.mark.asyncio` decorator is used to run async tests with pytest. diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_count_prompt_tokens_from_request.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_count_prompt_tokens_from_request.py index f089a4630..d10fe7624 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_count_prompt_tokens_from_request.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_count_prompt_tokens_from_request.py @@ -1,8 +1,10 @@ -import pytest from unittest.mock import Mock +import pytest # Assuming the function is imported from the correct module -from opentelemetry.instrumentation.anthropic.utils import count_prompt_tokens_from_request +from opentelemetry.instrumentation.anthropic.utils import \ + count_prompt_tokens_from_request + @pytest.mark.describe("Tests for count_prompt_tokens_from_request") class TestCountPromptTokensFromRequest: @@ -15,9 +17,9 @@ def test_single_prompt_string(self): anthropic = Mock() anthropic.count_tokens = Mock(return_value=5) request = {"prompt": "Hello, world!"} - + result = count_prompt_tokens_from_request(anthropic, request) - + assert result == 5 anthropic.count_tokens.assert_called_once_with("Hello, world!") @@ -34,9 +36,9 @@ def test_multiple_messages_with_string_content(self): {"content": "How are you?"} ] } - + result = count_prompt_tokens_from_request(anthropic, request) - + assert result == 7 anthropic.count_tokens.assert_any_call("Hi") anthropic.count_tokens.assert_any_call("How are you?") @@ -53,9 +55,9 @@ def test_messages_with_list_content(self): {"content": [{"type": "text", "text": "Hello"}, {"type": "text", "text": "World"}]} ] } - + result = count_prompt_tokens_from_request(anthropic, request) - + assert result == 5 anthropic.count_tokens.assert_any_call("Hello") anthropic.count_tokens.assert_any_call("World") @@ -67,9 +69,9 @@ def test_empty_request(self): """ anthropic = Mock() request = {} - + result = count_prompt_tokens_from_request(anthropic, request) - + assert result == 0 anthropic.count_tokens.assert_not_called() @@ -81,9 +83,9 @@ def test_no_count_tokens_method(self): anthropic = Mock() del anthropic.count_tokens request = {"prompt": "Hello, world!"} - + result = count_prompt_tokens_from_request(anthropic, request) - + assert result == 0 @pytest.mark.edge_case @@ -99,9 +101,9 @@ def test_non_string_content_in_messages(self): {"content": {"type": "image", "url": "http://example.com/image.png"}} ] } - + result = count_prompt_tokens_from_request(anthropic, request) - + assert result == 0 anthropic.count_tokens.assert_not_called() @@ -112,13 +114,10 @@ def test_mixed_content_types_in_list(self): """ anthropic = Mock() anthropic.count_tokens = Mock(return_value=3) - request = { - "messages": [ - {"content": [{"type": "text", "text": "Hello"}, {"type": "image", "url": "http://example.com/image.png"}]} - ] - } - + request = {"messages": [{"content": [{"type": "text", "text": "Hello"}, + {"type": "image", "url": "http://example.com/image.png"}]}]} + result = count_prompt_tokens_from_request(anthropic, request) - + assert result == 3 - anthropic.count_tokens.assert_called_once_with("Hello") \ No newline at end of file + anthropic.count_tokens.assert_called_once_with("Hello") diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_error_metrics_attributes.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_error_metrics_attributes.py index efbbf680d..9bacad2e1 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_error_metrics_attributes.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_error_metrics_attributes.py @@ -1,7 +1,10 @@ import pytest -from opentelemetry.instrumentation.anthropic.utils import error_metrics_attributes +from opentelemetry.instrumentation.anthropic.utils import \ + error_metrics_attributes # Describe block for all tests related to error_metrics_attributes + + @pytest.mark.describe("Tests for error_metrics_attributes function") class TestErrorMetricsAttributes: @@ -79,4 +82,4 @@ class SubclassException(BaseException): "error.type": "SubclassException" } -# To run these tests, you would typically use the command: pytest -v \ No newline at end of file +# To run these tests, you would typically use the command: pytest -v diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_run_async.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_run_async.py index 6a5d6e4ec..b76c02761 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_run_async.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_run_async.py @@ -1,11 +1,11 @@ -import pytest import asyncio import unittest - +import pytest # Import the run_async function from the specified path from opentelemetry.instrumentation.anthropic.utils import run_async + @pytest.mark.describe("run_async function tests") class TestRunAsync: @@ -35,7 +35,6 @@ async def sample_coroutine(): result = run_async(sample_coroutine()) assert result is None # Since the function doesn't return anything - @pytest.mark.edge_case def test_run_async_with_exception_in_coroutine(self): """ diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_set_span_attribute.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_set_span_attribute.py index e563e66f4..9d77fb806 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_set_span_attribute.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_set_span_attribute.py @@ -1,10 +1,12 @@ -import pytest from unittest.mock import Mock +import pytest # Import the function to be tested from opentelemetry.instrumentation.anthropic.utils import set_span_attribute # Describe block for set_span_attribute tests + + @pytest.mark.describe("set_span_attribute function") class TestSetSpanAttribute: @@ -74,6 +76,7 @@ def test_set_attribute_with_boolean_value(self): set_span_attribute(span, "test.attribute", boolean_value) span.set_attribute.assert_called_once_with("test.attribute", boolean_value) + # Run the tests if __name__ == "__main__": - pytest.main() \ No newline at end of file + pytest.main() diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_shared_metrics_attributes.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_shared_metrics_attributes.py index 895cdd4b2..8b2baf7ac 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_shared_metrics_attributes.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_shared_metrics_attributes.py @@ -1,14 +1,19 @@ -import pytest from unittest.mock import MagicMock -from opentelemetry.instrumentation.anthropic.utils import shared_metrics_attributes + +import pytest from opentelemetry.instrumentation.anthropic.config import Config +from opentelemetry.instrumentation.anthropic.utils import \ + shared_metrics_attributes from opentelemetry.semconv_ai import SpanAttributes # Mock configuration for common metrics attributes + + @pytest.fixture(autouse=True) def mock_config(): Config.get_common_metrics_attributes = MagicMock(return_value={"common_attr": "value"}) + @pytest.mark.describe("shared_metrics_attributes") class TestSharedMetricsAttributes: @@ -82,4 +87,4 @@ def test_shared_metrics_attributes_with_unexpected_attributes(self): "gen_ai.system": "anthropic", SpanAttributes.LLM_RESPONSE_MODEL: "test-model", } - assert shared_metrics_attributes(response) == expected_attributes \ No newline at end of file + assert shared_metrics_attributes(response) == expected_attributes diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_should_send_prompts.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_should_send_prompts.py index 27257fef6..1ad5bbecd 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_should_send_prompts.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_should_send_prompts.py @@ -1,8 +1,10 @@ import os -import pytest from unittest.mock import patch + +import pytest from opentelemetry.instrumentation.anthropic.utils import should_send_prompts + @pytest.mark.describe("Tests for should_send_prompts function") class TestShouldSendPrompts: @@ -24,7 +26,6 @@ def test_should_send_prompts_env_var_true_case_insensitive(self): with patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "TRUE"}): assert should_send_prompts() is True - @pytest.mark.happy_path def test_should_send_prompts_env_var_not_set(self): """ @@ -62,4 +63,4 @@ def test_should_send_prompts_no_env_var_no_override(self): """ with patch.dict(os.environ, {}, clear=True): with patch('opentelemetry.context.get_value', return_value=False): - assert should_send_prompts() is True \ No newline at end of file + assert should_send_prompts() is True diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_sync_wrapper.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_sync_wrapper.py index 6bcf594b8..6cc541c4b 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_sync_wrapper.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/test_early_utils/test_early_sync_wrapper.py @@ -1,23 +1,31 @@ -import pytest import logging from unittest.mock import Mock, patch + +import pytest from opentelemetry.instrumentation.anthropic.utils import dont_throw # Mock Config to avoid side effects during testing + + class MockConfig: exception_logger = Mock() # Sample function to be wrapped + + def sample_function(x, y): return x + y # Sample function to raise an exception + + def exception_function(x, y): raise ValueError("An error occurred") + @pytest.mark.describe("sync_wrapper") class TestSyncWrapper: - + @pytest.mark.happy_path def test_sync_wrapper_happy_path(self): """ @@ -49,4 +57,4 @@ def none_return_function(): wrapped_function = dont_throw(none_return_function) result = wrapped_function() - assert result is None, "Expected the wrapped function to return None" \ No newline at end of file + assert result is None, "Expected the wrapped function to return None" diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/utils.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/utils.py index be032d28f..1c8dc2cbc 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/utils.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/utils.py @@ -1,8 +1,9 @@ import asyncio -import os import logging +import os import threading import traceback + from opentelemetry import context as context_api from opentelemetry.instrumentation.anthropic.config import Config from opentelemetry.semconv_ai import SpanAttributes diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/conftest.py b/packages/opentelemetry-instrumentation-anthropic/tests/conftest.py index 4625360fd..194a7771e 100644 --- a/packages/opentelemetry-instrumentation-anthropic/tests/conftest.py +++ b/packages/opentelemetry-instrumentation-anthropic/tests/conftest.py @@ -6,14 +6,13 @@ from opentelemetry import metrics, trace from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor from opentelemetry.sdk.metrics import Counter, Histogram, MeterProvider -from opentelemetry.sdk.metrics.export import ( - AggregationTemporality, - InMemoryMetricReader, -) +from opentelemetry.sdk.metrics.export import (AggregationTemporality, + InMemoryMetricReader) from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter +from opentelemetry.sdk.trace.export.in_memory_span_exporter import \ + InMemorySpanExporter pytest_plugins = [] diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py b/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py index bdcaa0291..85f631a4a 100644 --- a/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py +++ b/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py @@ -5,7 +5,7 @@ import pytest from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic, AsyncAnthropic -from opentelemetry.semconv_ai import SpanAttributes, Meters +from opentelemetry.semconv_ai import Meters, SpanAttributes def verify_metrics(resource_metrics, model_name: str, ignore_exception_metric: bool = False): diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/__init__.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/__init__.py index e9a9baee0..4cd6860f6 100644 --- a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/__init__.py +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/__init__.py @@ -10,24 +10,18 @@ from opentelemetry import context as context_api from opentelemetry.instrumentation.groq.config import Config from opentelemetry.instrumentation.groq.utils import ( - dont_throw, - error_metrics_attributes, - model_as_dict, - set_span_attribute, - shared_metrics_attributes, - should_send_prompts, -) + dont_throw, error_metrics_attributes, model_as_dict, set_span_attribute, + shared_metrics_attributes, should_send_prompts) from opentelemetry.instrumentation.groq.version import __version__ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap +from opentelemetry.instrumentation.utils import (_SUPPRESS_INSTRUMENTATION_KEY, + unwrap) from opentelemetry.metrics import Counter, Histogram, Meter, get_meter -from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GEN_AI_RESPONSE_ID +from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import \ + GEN_AI_RESPONSE_ID from opentelemetry.semconv_ai import ( - SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, - LLMRequestTypeValues, - SpanAttributes, - Meters, -) + SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, LLMRequestTypeValues, Meters, + SpanAttributes) from opentelemetry.trace import SpanKind, Tracer, get_tracer from opentelemetry.trace.status import Status, StatusCode from wrapt import wrap_function_wrapper diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_dont_throw.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_dont_throw.py index 94c9d4c9a..e793871c5 100644 --- a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_dont_throw.py +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_dont_throw.py @@ -1,6 +1,7 @@ -import pytest import logging from unittest.mock import Mock, patch + +import pytest from opentelemetry.instrumentation.groq.utils import dont_throw # Configure logging to capture log messages for assertions @@ -8,6 +9,8 @@ logger = logging.getLogger(__name__) # Describe block for dont_throw tests + + @pytest.mark.describe("dont_throw") class TestDontThrow: @@ -42,7 +45,7 @@ def test_edge_case_custom_exception_logger(self): Test that a custom exception logger is called when an exception occurs. """ custom_logger = Mock() - + @dont_throw def sample_function(x, y): return x / y @@ -75,6 +78,7 @@ def sample_function(x, y=10): result = sample_function(5, y=15) assert result == 20, "The function should correctly handle keyword arguments." + # Run the tests if __name__ == "__main__": - pytest.main() \ No newline at end of file + pytest.main() diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_error_metrics_attributes.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_error_metrics_attributes.py index 08fc3d43d..48b0c41f1 100644 --- a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_error_metrics_attributes.py +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_error_metrics_attributes.py @@ -2,6 +2,8 @@ from opentelemetry.instrumentation.groq.utils import error_metrics_attributes # Describe block for all tests related to error_metrics_attributes + + @pytest.mark.describe("Tests for error_metrics_attributes function") class TestErrorMetricsAttributes: @@ -63,6 +65,7 @@ class NonStandardException: "error.type": "NonStandardException", } + # Run the tests if __name__ == "__main__": - pytest.main() \ No newline at end of file + pytest.main() diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_model_as_dict.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_model_as_dict.py index 0f808b4b8..f0347b486 100644 --- a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_model_as_dict.py +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_model_as_dict.py @@ -1,9 +1,12 @@ -import pytest -from unittest.mock import MagicMock, patch from importlib.metadata import version +from unittest.mock import MagicMock, patch + +import pytest from opentelemetry.instrumentation.groq.utils import model_as_dict # Describe block for all tests related to model_as_dict + + @pytest.mark.describe("model_as_dict function") class TestModelAsDict: @@ -35,7 +38,6 @@ def test_model_as_dict_with_pydantic_v2(self): assert result == {"model": "test_model"} mock_model.model_dump.assert_called_once() - @pytest.mark.edge_case def test_model_as_dict_with_non_pydantic_object(self): """ @@ -68,4 +70,4 @@ def test_model_as_dict_with_none(self): """ result = model_as_dict(None) - assert result is None \ No newline at end of file + assert result is None diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_set_span_attribute.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_set_span_attribute.py index cf8ec2322..717f4dc4d 100644 --- a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_set_span_attribute.py +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_set_span_attribute.py @@ -1,9 +1,10 @@ -import pytest from unittest.mock import Mock +import pytest # Assuming the set_span_attribute function is imported from the correct module from opentelemetry.instrumentation.groq.utils import set_span_attribute + @pytest.mark.describe("Tests for set_span_attribute function") class TestSetSpanAttribute: @@ -71,4 +72,4 @@ def test_set_attribute_with_boolean_value(self): span = Mock() boolean_value = True set_span_attribute(span, "test.attribute", boolean_value) - span.set_attribute.assert_called_once_with("test.attribute", boolean_value) \ No newline at end of file + span.set_attribute.assert_called_once_with("test.attribute", boolean_value) diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_shared_metrics_attributes.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_shared_metrics_attributes.py index b104fb615..e69ac7e37 100644 --- a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_shared_metrics_attributes.py +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_shared_metrics_attributes.py @@ -1,21 +1,27 @@ -import pytest from unittest.mock import MagicMock, patch -from opentelemetry.instrumentation.groq.utils import shared_metrics_attributes + +import pytest from opentelemetry.instrumentation.groq.config import Config +from opentelemetry.instrumentation.groq.utils import shared_metrics_attributes from opentelemetry.semconv_ai import SpanAttributes # Mocking the Config class to control the behavior of get_common_metrics_attributes + + @pytest.fixture def mock_config(): with patch('opentelemetry.instrumentation.groq.config.Config.get_common_metrics_attributes') as mock: yield mock # Mocking the model_as_dict function + + @pytest.fixture def mock_model_as_dict(): with patch('opentelemetry.instrumentation.groq.utils.model_as_dict') as mock: yield mock + @pytest.mark.describe("shared_metrics_attributes") class TestSharedMetricsAttributes: @@ -93,4 +99,4 @@ def test_shared_metrics_attributes_with_exception_in_model_as_dict(self, mock_co result = shared_metrics_attributes(response) # Assert - assert result is None # Since the function is decorated with @dont_throw, it should return None on exception \ No newline at end of file + assert result is None # Since the function is decorated with @dont_throw, it should return None on exception diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_should_send_prompts.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_should_send_prompts.py index 2cd711120..8e9cea5b9 100644 --- a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_should_send_prompts.py +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/test_early_utils/test_early_should_send_prompts.py @@ -1,9 +1,12 @@ import os -import pytest from unittest import mock + +import pytest from opentelemetry.instrumentation.groq.utils import should_send_prompts # Describe block for should_send_prompts tests + + @pytest.mark.describe("Tests for should_send_prompts function") class TestShouldSendPrompts: @@ -42,7 +45,6 @@ def test_should_send_prompts_context_override(self): assert should_send_prompts() is True - # Run the tests if __name__ == "__main__": - pytest.main() \ No newline at end of file + pytest.main() diff --git a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/utils.py b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/utils.py index e283307b9..24850c17d 100644 --- a/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/utils.py +++ b/packages/opentelemetry-instrumentation-groq/opentelemetry/instrumentation/groq/utils.py @@ -1,7 +1,8 @@ -from importlib.metadata import version -import os import logging +import os import traceback +from importlib.metadata import version + from opentelemetry import context as context_api from opentelemetry.instrumentation.groq.config import Config from opentelemetry.semconv_ai import SpanAttributes diff --git a/packages/opentelemetry-instrumentation-groq/tests/traces/conftest.py b/packages/opentelemetry-instrumentation-groq/tests/traces/conftest.py index 8279e11bb..0285412c1 100644 --- a/packages/opentelemetry-instrumentation-groq/tests/traces/conftest.py +++ b/packages/opentelemetry-instrumentation-groq/tests/traces/conftest.py @@ -3,18 +3,17 @@ import os import pytest +from groq import AsyncGroq, Groq from opentelemetry import metrics, trace from opentelemetry.instrumentation.groq import GroqInstrumentor from opentelemetry.sdk.metrics import Counter, Histogram, MeterProvider -from opentelemetry.sdk.metrics.export import ( - AggregationTemporality, - InMemoryMetricReader, -) +from opentelemetry.sdk.metrics.export import (AggregationTemporality, + InMemoryMetricReader) from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter -from groq import Groq, AsyncGroq +from opentelemetry.sdk.trace.export.in_memory_span_exporter import \ + InMemorySpanExporter @pytest.fixture(scope="session") diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/__init__.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/__init__.py index 169902d43..c0d1b77be 100644 --- a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/__init__.py +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/__init__.py @@ -1,18 +1,16 @@ import logging from typing import Collection -from opentelemetry.instrumentation.haystack.config import Config -from wrapt import wrap_function_wrapper -from opentelemetry.trace import get_tracer -from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.instrumentation.utils import ( - unwrap, -) -from opentelemetry.instrumentation.haystack.wrap_openai import wrap as openai_wrapper -from opentelemetry.instrumentation.haystack.wrap_pipeline import ( - wrap as pipeline_wrapper, -) +from opentelemetry.instrumentation.haystack.config import Config from opentelemetry.instrumentation.haystack.version import __version__ +from opentelemetry.instrumentation.haystack.wrap_openai import \ + wrap as openai_wrapper +from opentelemetry.instrumentation.haystack.wrap_pipeline import \ + wrap as pipeline_wrapper +from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.instrumentation.utils import unwrap +from opentelemetry.trace import get_tracer +from wrapt import wrap_function_wrapper logger = logging.getLogger(__name__) diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_request.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_request.py index b779ff25c..4055398c1 100644 --- a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_request.py +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_request.py @@ -1,30 +1,35 @@ -import pytest -from unittest.mock import Mock, patch import json +from unittest.mock import Mock, patch + +import pytest from opentelemetry.instrumentation.haystack.utils import process_request from opentelemetry.semconv_ai import SpanAttributes # Mocking the context API and Config for testing + + @pytest.fixture(autouse=True) def mock_context_api(monkeypatch): mock_context = Mock() monkeypatch.setattr("opentelemetry.instrumentation.haystack.utils.context_api", mock_context) return mock_context + @pytest.fixture(autouse=True) def mock_config(monkeypatch): mock_config = Mock() monkeypatch.setattr("opentelemetry.instrumentation.haystack.utils.Config", mock_config) return mock_config + @pytest.fixture def mock_span(): return Mock() + @pytest.mark.describe("process_request function") class TestProcessRequest: - @pytest.mark.happy_path def test_process_request_with_empty_args_and_kwargs(self, mock_span): """ @@ -93,4 +98,4 @@ def test_process_request_with_exception_handling(self, mock_span, mock_config): process_request(mock_span, args, kwargs) # Ensure exception logger is called - mock_config.exception_logger.assert_called_once() \ No newline at end of file + mock_config.exception_logger.assert_called_once() diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_response.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_response.py index b3f749b16..df74f96ad 100644 --- a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_response.py +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_process_response.py @@ -1,16 +1,21 @@ -import pytest -from unittest.mock import Mock, patch import json +from unittest.mock import Mock, patch + +import pytest from opentelemetry.instrumentation.haystack.utils import process_response from opentelemetry.semconv_ai import SpanAttributes # Mocking the should_send_prompts function to control its behavior during tests + + @pytest.fixture def mock_should_send_prompts(): with patch('opentelemetry.instrumentation.haystack.utils.should_send_prompts') as mock: yield mock # Describe block for process_response tests + + @pytest.mark.describe("process_response function") class TestProcessResponse: @@ -93,6 +98,7 @@ def test_process_response_with_complex_response(self, mock_should_send_prompts): json.dumps(response) ) + # Run the tests if __name__ == "__main__": - pytest.main() \ No newline at end of file + pytest.main() diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_set_span_attribute.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_set_span_attribute.py index d6899ec9e..8b3a06b68 100644 --- a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_set_span_attribute.py +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_set_span_attribute.py @@ -1,12 +1,13 @@ -import pytest from unittest.mock import Mock +import pytest # Assuming the set_span_attribute function is imported from the correct module from opentelemetry.instrumentation.haystack.utils import set_span_attribute + @pytest.mark.describe("set_span_attribute") class TestSetSpanAttribute: - + @pytest.mark.happy_path def test_set_span_attribute_with_valid_value(self): """ @@ -15,9 +16,9 @@ def test_set_span_attribute_with_valid_value(self): span = Mock() name = "test.attribute" value = "test_value" - + set_span_attribute(span, name, value) - + span.set_attribute.assert_called_once_with(name, value) @pytest.mark.happy_path @@ -28,9 +29,9 @@ def test_set_span_attribute_with_empty_string(self): span = Mock() name = "test.attribute" value = "" - + set_span_attribute(span, name, value) - + span.set_attribute.assert_not_called() @pytest.mark.happy_path @@ -41,9 +42,9 @@ def test_set_span_attribute_with_none_value(self): span = Mock() name = "test.attribute" value = None - + set_span_attribute(span, name, value) - + span.set_attribute.assert_not_called() @pytest.mark.edge_case @@ -54,9 +55,9 @@ def test_set_span_attribute_with_numeric_value(self): span = Mock() name = "test.attribute" value = 123 - + set_span_attribute(span, name, value) - + span.set_attribute.assert_called_once_with(name, value) @pytest.mark.edge_case @@ -67,9 +68,9 @@ def test_set_span_attribute_with_boolean_value(self): span = Mock() name = "test.attribute" value = True - + set_span_attribute(span, name, value) - + span.set_attribute.assert_called_once_with(name, value) @pytest.mark.edge_case @@ -80,9 +81,9 @@ def test_set_span_attribute_with_special_characters(self): span = Mock() name = "test.attribute" value = "!@#$%^&*()_+" - + set_span_attribute(span, name, value) - + span.set_attribute.assert_called_once_with(name, value) @pytest.mark.edge_case @@ -93,9 +94,9 @@ def test_set_span_attribute_with_large_string(self): span = Mock() name = "test.attribute" value = "a" * 10000 # Large string - + set_span_attribute(span, name, value) - + span.set_attribute.assert_called_once_with(name, value) -# To run the tests, use the command: pytest -v \ No newline at end of file +# To run the tests, use the command: pytest -v diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_should_send_prompts.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_should_send_prompts.py index 5ef90b2f2..31348179a 100644 --- a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_should_send_prompts.py +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/test_early_utils/test_early_should_send_prompts.py @@ -1,8 +1,10 @@ import os -import pytest from unittest import mock -from opentelemetry.instrumentation.haystack.utils import should_send_prompts + +import pytest from opentelemetry import context as context_api +from opentelemetry.instrumentation.haystack.utils import should_send_prompts + @pytest.mark.describe("Tests for should_send_prompts function") class TestShouldSendPrompts: @@ -13,22 +15,18 @@ def test_should_send_prompts_env_var_true(self): with mock.patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "true"}): assert should_send_prompts() is True - @pytest.mark.happy_path def test_should_send_prompts_env_var_not_set(self): """Test should_send_prompts returns True when TRACELOOP_TRACE_CONTENT is not set.""" with mock.patch.dict(os.environ, {}, clear=True): assert should_send_prompts() is True - - @pytest.mark.edge_case def test_should_send_prompts_env_var_case_insensitivity(self): """Test should_send_prompts handles TRACELOOP_TRACE_CONTENT case insensitively.""" with mock.patch.dict(os.environ, {"TRACELOOP_TRACE_CONTENT": "TrUe"}): assert should_send_prompts() is True - @pytest.mark.edge_case def test_should_send_prompts_no_env_var_and_no_context(self): """Test should_send_prompts returns True when neither TRACELOOP_TRACE_CONTENT nor context is set.""" @@ -36,4 +34,4 @@ def test_should_send_prompts_no_env_var_and_no_context(self): context_api.set_value("override_enable_content_tracing", None) assert should_send_prompts() is True -# To run the tests, use the command: pytest -v \ No newline at end of file +# To run the tests, use the command: pytest -v diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_node.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_node.py index b53804223..c714b7e98 100644 --- a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_node.py +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_node.py @@ -1,10 +1,9 @@ import logging + from opentelemetry import context as context_api from opentelemetry.context import attach, set_value -from opentelemetry.instrumentation.utils import ( - _SUPPRESS_INSTRUMENTATION_KEY, -) from opentelemetry.instrumentation.haystack.utils import with_tracer_wrapper +from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY from opentelemetry.semconv_ai import SpanAttributes, TraceloopSpanKindValues logger = logging.getLogger(__name__) diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_openai.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_openai.py index 7c5b93708..e80ccd068 100644 --- a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_openai.py +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_openai.py @@ -1,17 +1,14 @@ import logging from opentelemetry import context as context_api +from opentelemetry.instrumentation.haystack.utils import (dont_throw, + set_span_attribute, + with_tracer_wrapper) +from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY +from opentelemetry.semconv_ai import LLMRequestTypeValues, SpanAttributes from opentelemetry.trace import SpanKind from opentelemetry.trace.status import Status, StatusCode -from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY -from opentelemetry.semconv_ai import SpanAttributes, LLMRequestTypeValues -from opentelemetry.instrumentation.haystack.utils import ( - dont_throw, - with_tracer_wrapper, - set_span_attribute, -) - logger = logging.getLogger(__name__) diff --git a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_pipeline.py b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_pipeline.py index b97047d43..2bf6a1262 100644 --- a/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_pipeline.py +++ b/packages/opentelemetry-instrumentation-haystack/opentelemetry/instrumentation/haystack/wrap_pipeline.py @@ -1,14 +1,11 @@ import logging + from opentelemetry import context as context_api from opentelemetry.context import attach, set_value -from opentelemetry.instrumentation.utils import ( - _SUPPRESS_INSTRUMENTATION_KEY, -) -from opentelemetry.instrumentation.haystack.utils import ( - with_tracer_wrapper, - process_request, - process_response, -) +from opentelemetry.instrumentation.haystack.utils import (process_request, + process_response, + with_tracer_wrapper) +from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY from opentelemetry.semconv_ai import SpanAttributes, TraceloopSpanKindValues logger = logging.getLogger(__name__) diff --git a/packages/opentelemetry-instrumentation-haystack/tests/conftest.py b/packages/opentelemetry-instrumentation-haystack/tests/conftest.py index e550a8088..12f76bbd4 100644 --- a/packages/opentelemetry-instrumentation-haystack/tests/conftest.py +++ b/packages/opentelemetry-instrumentation-haystack/tests/conftest.py @@ -1,12 +1,14 @@ """Unit tests configuration module.""" import os + import pytest from opentelemetry import trace +from opentelemetry.instrumentation.haystack import HaystackInstrumentor from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter from opentelemetry.sdk.trace.export import SimpleSpanProcessor -from opentelemetry.instrumentation.haystack import HaystackInstrumentor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import \ + InMemorySpanExporter pytest_plugins = [] diff --git a/packages/opentelemetry-instrumentation-haystack/tests/test_simple_pipeline.py b/packages/opentelemetry-instrumentation-haystack/tests/test_simple_pipeline.py index 2cc5c3295..832d713ce 100644 --- a/packages/opentelemetry-instrumentation-haystack/tests/test_simple_pipeline.py +++ b/packages/opentelemetry-instrumentation-haystack/tests/test_simple_pipeline.py @@ -1,8 +1,9 @@ import os + import pytest from haystack import Pipeline -from haystack.components.generators.chat import OpenAIChatGenerator from haystack.components.builders import DynamicChatPromptBuilder +from haystack.components.generators.chat import OpenAIChatGenerator from haystack.dataclasses import ChatMessage from haystack.utils import Secret from opentelemetry.semconv_ai import SpanAttributes diff --git a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/__init__.py b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/__init__.py index 4a54f5d0d..af603fb45 100644 --- a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/__init__.py +++ b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/__init__.py @@ -2,33 +2,27 @@ import logging import time -import pinecone from typing import Collection -from wrapt import wrap_function_wrapper +import pinecone from opentelemetry import context as context_api -from opentelemetry.metrics import get_meter -from opentelemetry.trace import get_tracer, SpanKind -from opentelemetry.trace.status import Status, StatusCode - from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.instrumentation.utils import ( - _SUPPRESS_INSTRUMENTATION_KEY, - unwrap, -) from opentelemetry.instrumentation.pinecone.config import Config -from opentelemetry.instrumentation.pinecone.utils import ( - dont_throw, - is_metrics_enabled, - set_span_attribute, -) -from opentelemetry.instrumentation.pinecone.version import __version__ from opentelemetry.instrumentation.pinecone.query_handlers import ( - set_query_input_attributes, - set_query_response, -) + set_query_input_attributes, set_query_response) +from opentelemetry.instrumentation.pinecone.utils import (dont_throw, + is_metrics_enabled, + set_span_attribute) +from opentelemetry.instrumentation.pinecone.version import __version__ +from opentelemetry.instrumentation.utils import (_SUPPRESS_INSTRUMENTATION_KEY, + unwrap) +from opentelemetry.metrics import get_meter from opentelemetry.semconv.trace import SpanAttributes -from opentelemetry.semconv_ai import Meters, SpanAttributes as AISpanAttributes +from opentelemetry.semconv_ai import Meters +from opentelemetry.semconv_ai import SpanAttributes as AISpanAttributes +from opentelemetry.trace import SpanKind, get_tracer +from opentelemetry.trace.status import Status, StatusCode +from wrapt import wrap_function_wrapper logger = logging.getLogger(__name__) diff --git a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/query_handlers.py b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/query_handlers.py index c259eaafa..fa7012bf8 100644 --- a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/query_handlers.py +++ b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/query_handlers.py @@ -1,7 +1,8 @@ import json +from opentelemetry.instrumentation.pinecone.utils import (dont_throw, + set_span_attribute) from opentelemetry.semconv_ai import EventAttributes, Events, SpanAttributes -from opentelemetry.instrumentation.pinecone.utils import dont_throw, set_span_attribute @dont_throw diff --git a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_dont_throw.py b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_dont_throw.py index 47ce73c7d..481e5ad8d 100644 --- a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_dont_throw.py +++ b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_dont_throw.py @@ -1,9 +1,12 @@ -import pytest import logging from unittest.mock import Mock, patch + +import pytest from opentelemetry.instrumentation.pinecone.utils import dont_throw # Create a mock logger to capture log outputs + + class MockLogger: def __init__(self): self.messages = [] @@ -11,17 +14,18 @@ def __init__(self): def debug(self, msg, *args): self.messages.append(msg % args) + @pytest.mark.describe("dont_throw") class TestDontThrow: - + @pytest.mark.happy_path def test_function_executes_without_exception(self): """Test that the wrapped function executes successfully without exceptions.""" mock_func = Mock(return_value="success") wrapped_func = dont_throw(mock_func) - + result = wrapped_func() - + assert result == "success" mock_func.assert_called_once() @@ -30,9 +34,9 @@ def test_function_with_arguments(self): """Test that the wrapped function executes successfully with arguments.""" mock_func = Mock(return_value="success") wrapped_func = dont_throw(mock_func) - + result = wrapped_func(1, 2, key="value") - + assert result == "success" mock_func.assert_called_once_with(1, 2, key="value") @@ -41,8 +45,8 @@ def test_function_with_no_return_value(self): """Test that the wrapped function handles functions with no return value.""" mock_func = Mock(return_value=None) wrapped_func = dont_throw(mock_func) - + result = wrapped_func() - + assert result is None - mock_func.assert_called_once() \ No newline at end of file + mock_func.assert_called_once() diff --git a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_is_metrics_enabled.py b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_is_metrics_enabled.py index ef37f4614..314f5c476 100644 --- a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_is_metrics_enabled.py +++ b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_is_metrics_enabled.py @@ -1,7 +1,9 @@ import os + import pytest from opentelemetry.instrumentation.pinecone.utils import is_metrics_enabled + @pytest.mark.describe("is_metrics_enabled function") class TestIsMetricsEnabled: @@ -71,8 +73,10 @@ def test_metrics_enabled_none_string(self): assert is_metrics_enabled() is False # Clean up environment variable after tests + + @pytest.fixture(autouse=True) def cleanup_env(): yield if "TRACELOOP_METRICS_ENABLED" in os.environ: - del os.environ["TRACELOOP_METRICS_ENABLED"] \ No newline at end of file + del os.environ["TRACELOOP_METRICS_ENABLED"] diff --git a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_set_span_attribute.py b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_set_span_attribute.py index 249453deb..ee36dd63e 100644 --- a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_set_span_attribute.py +++ b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/test_early_utils/test_early_set_span_attribute.py @@ -1,9 +1,10 @@ -import pytest from unittest.mock import Mock +import pytest # Assuming the set_span_attribute function is imported from the correct module from opentelemetry.instrumentation.pinecone.utils import set_span_attribute + @pytest.mark.describe("Tests for set_span_attribute function") class TestSetSpanAttribute: @@ -70,4 +71,4 @@ def test_set_attribute_with_numeric_name_and_value(self): set_span_attribute(span, 123, 456) span.set_attribute.assert_called_once_with(123, 456) -# To run the tests, use the command: pytest -v \ No newline at end of file +# To run the tests, use the command: pytest -v diff --git a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/utils.py b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/utils.py index 7b53f174e..6b6a511c6 100644 --- a/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/utils.py +++ b/packages/opentelemetry-instrumentation-pinecone/opentelemetry/instrumentation/pinecone/utils.py @@ -1,6 +1,7 @@ import logging import os import traceback + from opentelemetry.instrumentation.pinecone.config import Config diff --git a/packages/opentelemetry-instrumentation-pinecone/tests/conftest.py b/packages/opentelemetry-instrumentation-pinecone/tests/conftest.py index cdfb9fd51..b9ba126eb 100644 --- a/packages/opentelemetry-instrumentation-pinecone/tests/conftest.py +++ b/packages/opentelemetry-instrumentation-pinecone/tests/conftest.py @@ -1,16 +1,18 @@ """Unit tests configuration module.""" import os + import pytest -from opentelemetry import trace, metrics -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter -from opentelemetry.sdk.trace.export import SimpleSpanProcessor -from opentelemetry.sdk.resources import Resource -from opentelemetry.sdk.metrics import MeterProvider -from opentelemetry.sdk.metrics.export import InMemoryMetricReader +from opentelemetry import metrics, trace from opentelemetry.instrumentation.openai import OpenAIInstrumentor from opentelemetry.instrumentation.pinecone import PineconeInstrumentor +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.export import InMemoryMetricReader +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SimpleSpanProcessor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import \ + InMemorySpanExporter pytest_plugins = []