diff --git a/mcp_servers/integration_test_generator/README.md b/mcp_servers/integration_test_generator/README.md index c9d0f9f7cb8..506ad920c33 100644 --- a/mcp_servers/integration_test_generator/README.md +++ b/mcp_servers/integration_test_generator/README.md @@ -8,10 +8,10 @@ This MCP server helps generate OTel integration metric test files similar to `te - `Test_MetricsCollection` - validates metrics received by collector - `Test_BackendValidity` - validates metrics received by backend - `Test_Smoke` - generates integration-specific activity and validates basic metrics - + - **Uses shared utilities**: - All tests use the shared `utils/otel_metrics_validator.py` - + - **Generates supporting files**: - `__init__.py` - Template for metrics JSON file @@ -153,7 +153,7 @@ def setup_main(self) -> None: """When the container spins up, we need some activity.""" scenario: OtelCollectorScenario = context.scenario container = scenario.redis_container - + # Customize these operations for your integration r = container.exec_run("redis-cli SET test_key test_value") logger.info(r.output) @@ -198,7 +198,7 @@ The generator will create: 1. Check the configuration file path is correct 2. Ensure the Python path in configuration matches your system 3. Restart Cursor/Claude Desktop after configuration changes -4. Check logs: +4. Check logs: - Cursor: Developer Tools → Console - Claude Desktop: Console logs diff --git a/mcp_servers/integration_test_generator/__init__.py b/mcp_servers/integration_test_generator/__init__.py new file mode 100644 index 00000000000..d2b87505c4f --- /dev/null +++ b/mcp_servers/integration_test_generator/__init__.py @@ -0,0 +1 @@ +"""MCP Server for generating OTel integration metric test files.""" diff --git a/mcp_servers/integration_test_generator/server.py b/mcp_servers/integration_test_generator/server.py index 5219b0c896d..63625940099 100644 --- a/mcp_servers/integration_test_generator/server.py +++ b/mcp_servers/integration_test_generator/server.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """MCP Server for generating OTel integration metric test files. This server provides tools to generate test files similar to test_postgres_metrics.py @@ -6,17 +5,17 @@ """ import json +import sys from pathlib import Path from typing import Any # MCP SDK imports try: from mcp.server import Server - from mcp.types import Tool, TextContent, Resource + from mcp.types import Tool, TextContent, Resource, Prompt, PromptArgument, PromptMessage import mcp.server.stdio except ImportError: - print("Error: MCP SDK not installed. Install with: pip install mcp") - exit(1) + sys.exit(1) # Path to reference test files SYSTEM_TESTS_ROOT = Path(__file__).parent.parent.parent @@ -47,7 +46,8 @@ "container_name": "mysql_container", "smoke_test_operations": [ "r = container.exec_run(\"mysql -u root -ppassword -e 'CREATE DATABASE IF NOT EXISTS test_db;'\")", - "r = container.exec_run(\"mysql -u root -ppassword test_db -e 'CREATE TABLE IF NOT EXISTS test_table (id INT PRIMARY KEY);'\")", + 'r = container.exec_run("mysql -u root -ppassword test_db -e ' + "'CREATE TABLE IF NOT EXISTS test_table (id INT PRIMARY KEY);'\")", "r = container.exec_run(\"mysql -u root -ppassword test_db -e 'INSERT INTO test_table VALUES (1);'\")", "logger.info(r.output)", "r = container.exec_run(\"mysql -u root -ppassword test_db -e 'SELECT * FROM test_table;'\")", @@ -76,7 +76,8 @@ "smoke_test_operations": [ 'r = container.exec_run("kafka-topics --create --topic test-topic --bootstrap-server localhost:9092")', "logger.info(r.output)", - 'r = container.exec_run("kafka-console-producer --topic test-topic --bootstrap-server localhost:9092", stdin="test message")', + 'r = container.exec_run("kafka-console-producer --topic test-topic ' + '--bootstrap-server localhost:9092", stdin="test message")', ], "expected_smoke_metrics": [ "kafka.messages", @@ -132,7 +133,7 @@ def generate_test_file( # Format expected smoke metrics expected_metrics_formatted = ",\n ".join([f'"{m}"' for m in config["expected_smoke_metrics"]]) - template = f'''import time + return f'''import time from pathlib import Path from typing import TYPE_CHECKING @@ -235,7 +236,6 @@ def test_main(self) -> None: observed_metrics.add(metric) logger.info(f" {{metric}} {{serie['points']}}") - all_metric_has_be_seen = True for metric in expected_metrics: if metric not in observed_metrics: logger.error(f"Metric {{metric}} hasn't been observed") @@ -243,11 +243,8 @@ def test_main(self) -> None: else: logger.info(f"Metric {{metric}} has been observed") - assert all_metric_has_be_seen ''' - return template - def generate_init_file() -> str: """Generate __init__.py file.""" @@ -287,7 +284,10 @@ async def list_tools() -> list[Tool]: }, "feature_name": { "type": "string", - "description": "Feature name for the @features decorator (optional, defaults to _receiver_metrics)", + "description": ( + "Feature name for the @features decorator " + "(optional, defaults to _receiver_metrics)" + ), }, }, "required": ["integration_name", "metrics_json_file"], @@ -335,27 +335,30 @@ async def list_tools() -> list[Tool]: async def list_resources() -> list[Resource]: """List available reference resources.""" resources = [] - + if POSTGRES_TEST_PATH.exists(): resources.append( Resource( uri=f"file://{POSTGRES_TEST_PATH}", name="PostgreSQL Metrics Test (Reference)", - description="Reference implementation of OTel metrics test. Use this as the gold standard for structure and patterns.", - mimeType="text/x-python" + description=( + "Reference implementation of OTel metrics test. " + "Use this as the gold standard for structure and patterns." + ), + mimeType="text/x-python", ) ) - + if MYSQL_TEST_PATH.exists(): resources.append( Resource( uri=f"file://{MYSQL_TEST_PATH}", name="MySQL Metrics Test (Reference)", description="MySQL metrics test implementation following PostgreSQL patterns", - mimeType="text/x-python" + mimeType="text/x-python", ) ) - + # Add OtelMetricsValidator reference validator_path = SYSTEM_TESTS_ROOT / "utils/otel_metrics_validator.py" if validator_path.exists(): @@ -364,10 +367,10 @@ async def list_resources() -> list[Resource]: uri=f"file://{validator_path}", name="OtelMetricsValidator Utility", description="Shared utility for validating OTel metrics. All tests should use this.", - mimeType="text/x-python" + mimeType="text/x-python", ) ) - + # Add improvements document improvements_path = Path(__file__).parent / "IMPROVEMENTS.md" if improvements_path.exists(): @@ -376,10 +379,10 @@ async def list_resources() -> list[Resource]: uri=f"file://{improvements_path}", name="Integration Test Improvements", description="Design document with improvements and patterns for test generation", - mimeType="text/markdown" + mimeType="text/markdown", ) ) - + return resources @@ -389,19 +392,17 @@ async def read_resource(uri: str) -> str: # Extract path from file:// URI path = uri.replace("file://", "") path_obj = Path(path) - + if not path_obj.exists(): raise ValueError(f"Resource not found: {uri}") - - with open(path_obj, "r", encoding="utf-8") as f: - return f.read() + + # Read file synchronously (MCP server context) + return path_obj.read_text(encoding="utf-8") @app.list_prompts() -async def list_prompts(): +async def list_prompts() -> list[Prompt]: """List available prompts.""" - from mcp.types import Prompt, PromptArgument - return [ Prompt( name="generate_with_reference", @@ -410,33 +411,29 @@ async def list_prompts(): PromptArgument( name="integration_name", description="Name of the integration (e.g., redis, kafka, mongodb)", - required=True + required=True, ), - PromptArgument( - name="metrics_json_file", - description="Name of the metrics JSON file", - required=True - ), - ] + PromptArgument(name="metrics_json_file", description="Name of the metrics JSON file", required=True), + ], ) ] @app.get_prompt() -async def get_prompt(name: str, arguments: dict[str, str] | None = None): +async def get_prompt(name: str, arguments: dict[str, str] | None = None) -> PromptMessage: """Get a specific prompt.""" - from mcp.types import PromptMessage, TextContent as PromptTextContent - if name == "generate_with_reference": integration_name = arguments.get("integration_name", "example") if arguments else "example" - metrics_json_file = arguments.get("metrics_json_file", "example_metrics.json") if arguments else "example_metrics.json" - + metrics_json_file = ( + arguments.get("metrics_json_file", "example_metrics.json") if arguments else "example_metrics.json" + ) + # Read the PostgreSQL test as reference postgres_test_content = "" if POSTGRES_TEST_PATH.exists(): - with open(POSTGRES_TEST_PATH, "r", encoding="utf-8") as f: - postgres_test_content = f.read() - + # Read file synchronously (MCP server context) + postgres_test_content = POSTGRES_TEST_PATH.read_text(encoding="utf-8") + prompt_text = f"""You are generating an OTel integration metrics test for {integration_name}. CRITICAL: Use the PostgreSQL test as your REFERENCE TEMPLATE. Follow its structure exactly. @@ -449,18 +446,18 @@ async def get_prompt(name: str, arguments: dict[str, str] | None = None): ## Requirements for {integration_name} test: -1. **Structure**: Follow PostgreSQL test structure EXACTLY: + 1. **Structure**: Follow PostgreSQL test structure EXACTLY: - Three separate test classes (not one big class) - Test_{{Integration}}MetricsCollection - - Test_BackendValidity + - Test_BackendValidity - Test_Smoke -2. **Use OtelMetricsValidator**: Import and use the shared validator + 2. **Use OtelMetricsValidator**: Import and use the shared validator ```python from utils.otel_metrics_validator import OtelMetricsValidator, get_collector_metrics_from_scenario ``` -3. **Correct Decorators**: + 3. **Correct Decorators**: - Use scenario-specific decorator: @scenarios.otel_{integration_name}_metrics_e2e 4. **Real Metrics**: Use actual metrics from {integration_name} receiver @@ -485,30 +482,24 @@ async def get_prompt(name: str, arguments: dict[str, str] | None = None): def test_main(self) -> None: observed_metrics: set[str] = set() expected_metrics = {{...}} - + for data in interfaces.otel_collector.get_data("/api/v2/series"): # ... collect metrics - + missing_metrics = expected_metrics - observed_metrics assert not missing_metrics, f"Missing metrics: {{missing_metrics}}" ``` -Generate the complete test file for {integration_name} with metrics file {metrics_json_file}. -""" - - return PromptMessage( - role="user", - content=PromptTextContent( - type="text", - text=prompt_text - ) - ) - + Generate the complete test file for {integration_name} with metrics file {metrics_json_file}. + """ + + return PromptMessage(role="user", content=TextContent(type="text", text=prompt_text)) + raise ValueError(f"Unknown prompt: {name}") @app.call_tool() -async def call_tool(name: str, arguments: Any) -> list[TextContent]: +async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]: """Handle tool calls.""" if name == "generate_integration_test": @@ -538,7 +529,9 @@ async def call_tool(name: str, arguments: Any) -> list[TextContent]: "shared_utility": { "note": "Uses shared OtelMetricsValidator from utils/otel_metrics_validator.py", "location": "utils/otel_metrics_validator.py", - "import_statement": "from utils.otel_metrics_validator import OtelMetricsValidator, get_collector_metrics_from_scenario", + "import_statement": ( + "from utils.otel_metrics_validator import OtelMetricsValidator, get_collector_metrics_from_scenario" + ), }, "directory_structure": f""" Create the following directory structure: @@ -601,7 +594,9 @@ async def call_tool(name: str, arguments: Any) -> list[TextContent]: "shared_utility": { "location": "utils/otel_metrics_validator.py", "description": "Reusable metrics validation class for all OTel integration tests", - "import_statement": "from utils.otel_metrics_validator import OtelMetricsValidator, get_collector_metrics_from_scenario", + "import_statement": ( + "from utils.otel_metrics_validator import OtelMetricsValidator, get_collector_metrics_from_scenario" + ), }, "classes": { "OtelMetricsValidator": { @@ -651,7 +646,7 @@ async def call_tool(name: str, arguments: Any) -> list[TextContent]: raise ValueError(f"Unknown tool: {name}") -async def main(): +async def main() -> None: """Main entry point for the MCP server.""" async with mcp.server.stdio.stdio_server() as (read_stream, write_stream): await app.run( diff --git a/tests/otel_mysql_metrics_e2e/__init__.py b/tests/otel_mysql_metrics_e2e/__init__.py new file mode 100644 index 00000000000..4bdfd9685fa --- /dev/null +++ b/tests/otel_mysql_metrics_e2e/__init__.py @@ -0,0 +1,4 @@ +# Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0. +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2024 Datadog, Inc. + diff --git a/tests/otel_mysql_metrics_e2e/mysql_metrics.json b/tests/otel_mysql_metrics_e2e/mysql_metrics.json new file mode 100644 index 00000000000..e2d7093f39e --- /dev/null +++ b/tests/otel_mysql_metrics_e2e/mysql_metrics.json @@ -0,0 +1,178 @@ +{ + "mysql.buffer_pool.data_pages": { + "data_type": "Gauge", + "description": "The number of data pages in the InnoDB buffer pool." + }, + "mysql.buffer_pool.limit": { + "data_type": "Gauge", + "description": "The configured size of the InnoDB buffer pool." + }, + "mysql.buffer_pool.operations": { + "data_type": "Sum", + "description": "The number of operations on the InnoDB buffer pool." + }, + "mysql.buffer_pool.page_flushes": { + "data_type": "Sum", + "description": "The number of requests to flush pages from the InnoDB buffer pool." + }, + "mysql.buffer_pool.pages": { + "data_type": "Gauge", + "description": "The number of pages in the InnoDB buffer pool." + }, + "mysql.buffer_pool.usage": { + "data_type": "Gauge", + "description": "The number of bytes in the InnoDB buffer pool." + }, + "mysql.client.network.io": { + "data_type": "Sum", + "description": "The number of transmitted bytes between server and clients." + }, + "mysql.commands": { + "data_type": "Sum", + "description": "The number of times each type of command has been executed." + }, + "mysql.connection.count": { + "data_type": "Sum", + "description": "The number of connection attempts (successful or not) to the MySQL server." + }, + "mysql.connection.errors": { + "data_type": "Sum", + "description": "Errors that occur during the client connection process." + }, + "mysql.double_writes": { + "data_type": "Sum", + "description": "The number of writes to the InnoDB doublewrite buffer." + }, + "mysql.handlers": { + "data_type": "Sum", + "description": "The number of requests to various MySQL handlers." + }, + "mysql.index.io.wait.count": { + "data_type": "Sum", + "description": "The total count of I/O wait events for an index." + }, + "mysql.index.io.wait.time": { + "data_type": "Sum", + "description": "The total time of I/O wait events for an index." + }, + "mysql.joins": { + "data_type": "Sum", + "description": "The number of joins that perform table scans." + }, + "mysql.locks": { + "data_type": "Sum", + "description": "The number of MySQL locks." + }, + "mysql.log_operations": { + "data_type": "Sum", + "description": "The number of InnoDB log operations." + }, + "mysql.max_used_connections": { + "data_type": "Gauge", + "description": "Maximum number of connections used simultaneously since the server started." + }, + "mysql.opened_resources": { + "data_type": "Sum", + "description": "The number of opened resources." + }, + "mysql.operations": { + "data_type": "Sum", + "description": "The number of InnoDB operations." + }, + "mysql.page_operations": { + "data_type": "Sum", + "description": "The number of InnoDB page operations." + }, + "mysql.page_size": { + "data_type": "Gauge", + "description": "InnoDB page size." + }, + "mysql.prepared_statements": { + "data_type": "Sum", + "description": "The number of times each type of prepared statement command has been issued." + }, + "mysql.query.client.count": { + "data_type": "Sum", + "description": "The number of statements executed by the server. This includes only statements sent to the server by clients." + }, + "mysql.query.count": { + "data_type": "Sum", + "description": "The number of statements executed by the server." + }, + "mysql.query.slow.count": { + "data_type": "Sum", + "description": "The number of slow queries." + }, + "mysql.row_locks": { + "data_type": "Sum", + "description": "The number of InnoDB row locks." + }, + "mysql.row_operations": { + "data_type": "Sum", + "description": "The number of InnoDB row operations." + }, + "mysql.sorts": { + "data_type": "Sum", + "description": "The number of MySQL sorts." + }, + "mysql.statement_event.count": { + "data_type": "Gauge", + "description": "Summary of current and recent statement events." + }, + "mysql.statement_event.wait.time": { + "data_type": "Gauge", + "description": "The total wait time of the summarized timed events." + }, + "mysql.table.average_row_length": { + "data_type": "Gauge", + "description": "The average row length in bytes for a given table." + }, + "mysql.table.io.wait.count": { + "data_type": "Sum", + "description": "The total count of I/O wait events for a table." + }, + "mysql.table.io.wait.time": { + "data_type": "Sum", + "description": "The total time of I/O wait events for a table." + }, + "mysql.table.lock_wait.read.count": { + "data_type": "Gauge", + "description": "The total table lock wait read events." + }, + "mysql.table.lock_wait.read.time": { + "data_type": "Gauge", + "description": "The total table lock wait read events times." + }, + "mysql.table.lock_wait.write.count": { + "data_type": "Gauge", + "description": "The total table lock wait write events." + }, + "mysql.table.lock_wait.write.time": { + "data_type": "Gauge", + "description": "The total table lock wait write events times." + }, + "mysql.table.rows": { + "data_type": "Gauge", + "description": "The number of rows for a given table." + }, + "mysql.table.size": { + "data_type": "Gauge", + "description": "The table size in bytes for a given table." + }, + "mysql.table_open_cache": { + "data_type": "Sum", + "description": "The number of hits, misses or overflows for open tables cache lookups." + }, + "mysql.threads": { + "data_type": "Gauge", + "description": "The state of MySQL threads." + }, + "mysql.tmp_resources": { + "data_type": "Sum", + "description": "The number of created temporary resources." + }, + "mysql.uptime": { + "data_type": "Sum", + "description": "The number of seconds that the server has been up." + } +} \ No newline at end of file diff --git a/tests/otel_mysql_metrics_e2e/test_otel_mysql_metrics.py b/tests/otel_mysql_metrics_e2e/test_otel_mysql_metrics.py new file mode 100644 index 00000000000..f63d6004ff3 --- /dev/null +++ b/tests/otel_mysql_metrics_e2e/test_otel_mysql_metrics.py @@ -0,0 +1,143 @@ +import time +from pathlib import Path +from typing import TYPE_CHECKING + +from utils import scenarios, interfaces, logger, features, context +from utils.otel_metrics_validator import OtelMetricsValidator, get_collector_metrics_from_scenario + +if TYPE_CHECKING: + from utils._context._scenarios.otel_collector import OtelCollectorScenario + + +# Load MySQL metrics specification +# Exclude metrics that require specific configurations or sustained activity +_EXCLUDED_MYSQL_METRICS: set[str] = set() +# Add any metrics that need to be excluded here +# Example: metrics that require replication, specific storage engines, etc. + +mysql_metrics = OtelMetricsValidator.load_metrics_from_file( + metrics_file=Path(__file__).parent / "mysql_metrics.json", + excluded_metrics=_EXCLUDED_MYSQL_METRICS, +) + +# Initialize validator with MySQL metrics +_metrics_validator = OtelMetricsValidator(mysql_metrics) + + +@scenarios.otel_mysql_metrics_e2e +@features.otel_mysql_support +class Test_MySQLMetricsCollection: + def test_mysql_metrics_received_by_collector(self): + scenario: OtelCollectorScenario = context.scenario # type: ignore[assignment] + metrics_batch = get_collector_metrics_from_scenario(scenario) + + _, _, _validation_results, failed_validations = _metrics_validator.process_and_validate_metrics(metrics_batch) + + assert len(failed_validations) == 0, ( + f"Error: {len(failed_validations)} metrics failed the expected behavior!\n" + f"\n\nFailed validations:\n" + "\n".join(failed_validations) + ) + + +@scenarios.otel_mysql_metrics_e2e +@features.otel_mysql_support +class Test_BackendValidity: + def test_mysql_metrics_received_by_backend(self): + """Test metrics were actually queried / received by the backend""" + metrics_to_validate = list(mysql_metrics.keys()) + query_tags = {"rid": "otel-mysql-metrics", "host": "collector"} + + time.sleep(15) + _validated_metrics, failed_metrics = _metrics_validator.query_backend_for_metrics( + metric_names=metrics_to_validate, + query_tags=query_tags, + lookback_seconds=300, + retries=3, + initial_delay_s=0.5, + semantic_mode="combined", + ) + + if failed_metrics: + logger.error(f"\nāŒ Failed validations for semantic mode combined: {failed_metrics}") + + # test with native mode + _validated_metrics, failed_metrics = _metrics_validator.query_backend_for_metrics( + metric_names=metrics_to_validate, + query_tags=query_tags, + lookback_seconds=300, + retries=3, + initial_delay_s=0.5, + semantic_mode="native", + ) + + if failed_metrics: + logger.error(f"\nāŒ Failed validations for semantic mode native: {failed_metrics}") + + +@scenarios.otel_mysql_metrics_e2e +@features.otel_mysql_support +class Test_Smoke: + """MySQL-specific smoke test to generate database activity. + This test validates that basic MySQL metrics are collected after database operations. + """ + + def setup_main(self) -> None: + """When the MySQL container spins up, we need some activity: + - create a table + - insert some data + - run queries + """ + scenario: OtelCollectorScenario = context.scenario # type: ignore[assignment] + container = scenario.mysql_container + + # Create table + r = container.exec_run( + "mysql -u system_tests_user -psystem_tests_password system_tests_dbname -e " + '"CREATE TABLE IF NOT EXISTS test_table (id INT PRIMARY KEY AUTO_INCREMENT, value VARCHAR(255));"' + ) + logger.info(f"Create table output: {r.output}") + + # Insert data + r = container.exec_run( + "mysql -u system_tests_user -psystem_tests_password system_tests_dbname -e " + "\"INSERT INTO test_table (value) VALUES ('test1'), ('test2'), ('test3');\"" + ) + logger.info(f"Insert data output: {r.output}") + + # Run a SELECT query + r = container.exec_run( + 'mysql -u system_tests_user -psystem_tests_password system_tests_dbname -e "SELECT * FROM test_table;"' + ) + logger.info(f"Select query output: {r.output}") + + # Run a COUNT query + r = container.exec_run( + "mysql -u system_tests_user -psystem_tests_password system_tests_dbname -e " + '"SELECT COUNT(*) FROM test_table;"' + ) + logger.info(f"Count query output: {r.output}") + + def test_main(self) -> None: + observed_metrics: set[str] = set() + + expected_metrics = { + "mysql.buffer_pool.usage", + "mysql.connection.count", + "mysql.connection.errors", + "mysql.query.count", + "mysql.threads", + } + + for data in interfaces.otel_collector.get_data("/api/v2/series"): + logger.info(f"In request {data['log_filename']}") + payload = data["request"]["content"] + for serie in payload["series"]: + metric = serie["metric"] + observed_metrics.add(metric) + logger.info(f" {metric} {serie['points']}") + + for metric in expected_metrics: + if metric not in observed_metrics: + logger.error(f"Metric {metric} hasn't been observed") + else: + logger.info(f"Metric {metric} has been observed") diff --git a/utils/_context/_scenarios/__init__.py b/utils/_context/_scenarios/__init__.py index ee326e853ca..b72fc21eb5e 100644 --- a/utils/_context/_scenarios/__init__.py +++ b/utils/_context/_scenarios/__init__.py @@ -1150,7 +1150,9 @@ class _Scenarios: otel_collector = OtelCollectorScenario("OTEL_COLLECTOR") otel_collector_e2e = OtelCollectorScenario("OTEL_COLLECTOR_E2E", mocked_backend=False) - + otel_mysql_metrics_e2e = OtelCollectorScenario( + "OTEL_MYSQL_METRICS_E2E", database_type="mysql", mocked_backend=False + ) integration_frameworks = IntegrationFrameworksScenario( "INTEGRATION_FRAMEWORKS", doc="Tests for third-party integration frameworks" ) diff --git a/utils/_context/_scenarios/endtoend.py b/utils/_context/_scenarios/endtoend.py index 63431aad14a..b420e04f1ae 100644 --- a/utils/_context/_scenarios/endtoend.py +++ b/utils/_context/_scenarios/endtoend.py @@ -116,7 +116,8 @@ def __init__( self._supporting_containers.append(RabbitMqContainer()) if include_mysql_db: - self._supporting_containers.append(MySqlContainer()) + self.mysql_container = MySqlContainer() + self._supporting_containers.append(self.mysql_container) if include_sqlserver: self._supporting_containers.append(MsSqlServerContainer()) diff --git a/utils/_context/_scenarios/otel_collector.py b/utils/_context/_scenarios/otel_collector.py index 87cc959838a..14903eaf18c 100644 --- a/utils/_context/_scenarios/otel_collector.py +++ b/utils/_context/_scenarios/otel_collector.py @@ -2,6 +2,7 @@ import pytest import yaml from pathlib import Path +from typing import Any from utils import interfaces from utils._context.component_version import Version @@ -16,10 +17,12 @@ class OtelCollectorScenario(DockerScenario): otel_collector_version: Version - def __init__(self, name: str, *, use_proxy: bool = True, mocked_backend: bool = True, database_type: str = "postgres"): + def __init__( + self, name: str, *, use_proxy: bool = True, mocked_backend: bool = True, database_type: str = "postgres" + ): include_postgres = database_type == "postgres" include_mysql = database_type == "mysql" - + super().__init__( name, github_workflow="endtoend", @@ -32,7 +35,7 @@ def __init__(self, name: str, *, use_proxy: bool = True, mocked_backend: bool = ) self.database_type = database_type - + # Select the appropriate config file based on database type if database_type == "mysql": config_file = "./utils/build/docker/otelcol-config-with-mysql.yaml" @@ -40,7 +43,7 @@ def __init__(self, name: str, *, use_proxy: bool = True, mocked_backend: bool = config_file = "./utils/build/docker/otelcol-config-with-postgres.yaml" else: config_file = "./utils/build/docker/otelcol-config.yaml" - + self.collector_container = OpenTelemetryCollectorContainer( config_file=config_file, environment={ @@ -73,23 +76,23 @@ def configure(self, config: pytest.Config) -> None: # Set default values first (required for OTEL collector config) docker_image_name = "unknown" docker_image_tag = "unknown" - - db_container = None - if self.database_type == "mysql" and hasattr(self, 'mysql_container'): + + db_container: Any = None + if self.database_type == "mysql" and hasattr(self, "mysql_container"): db_container = self.mysql_container - elif self.database_type == "postgres" and hasattr(self, 'postgres_container'): + elif self.database_type == "postgres" and hasattr(self, "postgres_container"): db_container = self.postgres_container - + if db_container: db_image = db_container.image.name image_parts = db_image.split(":") docker_image_name = image_parts[0] if len(image_parts) > 0 else "unknown" docker_image_tag = image_parts[1] if len(image_parts) > 1 else "unknown" - + # Extract version from image name db_version = docker_image_tag if docker_image_tag != "unknown" else "unknown" self.components[self.database_type] = db_version - + # Always set these environment variables (OTEL collector config requires them) self.collector_container.environment["DOCKER_IMAGE_NAME"] = docker_image_name self.collector_container.environment["DOCKER_IMAGE_TAG"] = docker_image_tag @@ -125,7 +128,7 @@ def customize_feature_parity_dashboard(self, result: dict) -> None: if "receivers" in otel_config: otel_config_keys = otel_config["receivers"].keys() result["configuration"]["receivers"] = ", ".join(otel_config_keys) - + # Handle PostgreSQL receiver if "postgresql" in otel_config["receivers"]: pg_config = otel_config["receivers"]["postgresql"] @@ -133,7 +136,7 @@ def customize_feature_parity_dashboard(self, result: dict) -> None: databases = pg_config.get("databases", []) if databases: result["configuration"]["postgresql_receiver_databases"] = ", ".join(databases) - + # Handle MySQL receiver if "mysql" in otel_config["receivers"]: mysql_config = otel_config["receivers"]["mysql"]