diff --git a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py index 9ab46f5ba4..fa7684adc0 100644 --- a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py +++ b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py @@ -452,10 +452,7 @@ def _create_llm_span( ) _set_span_attribute(span, GenAIAttributes.GEN_AI_SYSTEM, vendor) - _set_span_attribute(span, SpanAttributes.LLM_REQUEST_TYPE, request_type.value) - _set_span_attribute( - span, GenAIAttributes.GEN_AI_OPERATION_NAME, GenAICustomOperationName.LLM_REQUEST.value - ) + _set_span_attribute(span, GenAIAttributes.GEN_AI_OPERATION_NAME, request_type.value) # we already have an LLM span by this point, # so skip any downstream instrumentation from here @@ -732,11 +729,11 @@ def on_llm_end( span, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens ) _set_span_attribute( - span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens + span, SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens ) # Record token usage metrics - vendor = span.attributes.get(GenAIAttributes.GEN_AI_SYSTEM, "Langchain") + vendor = span.attributes.get(GenAIAttributes.GEN_AI_SYSTEM, "langchain") if prompt_tokens > 0: self.token_histogram.record( prompt_tokens, @@ -768,7 +765,7 @@ def on_llm_end( # Record duration before ending span duration = time.time() - self.spans[run_id].start_time - vendor = span.attributes.get(GenAIAttributes.GEN_AI_SYSTEM, "Langchain") + vendor = span.attributes.get(GenAIAttributes.GEN_AI_SYSTEM, "langchain") self.duration_histogram.record( duration, attributes={ diff --git a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/span_utils.py b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/span_utils.py index f7d71b18ab..fff0e321e2 100644 --- a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/span_utils.py +++ b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/span_utils.py @@ -97,23 +97,19 @@ def set_request_params(span, kwargs, span_holder: SpanHolder): _set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_TOP_P, params.get("top_p")) tools = kwargs.get("invocation_params", {}).get("tools", []) - for i, tool in enumerate(tools): - tool_function = tool.get("function", tool) - _set_span_attribute( - span, - f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}.name", - tool_function.get("name"), - ) - _set_span_attribute( - span, - f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}.description", - tool_function.get("description"), - ) - _set_span_attribute( - span, - f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}.parameters", - json.dumps(tool_function.get("parameters", tool.get("input_schema"))), - ) + if tools: + tool_defs = [] + for tool in tools: + tool_function = tool.get("function", tool) + tool_def = { + "name": tool_function.get("name"), + "description": tool_function.get("description"), + } + params = tool_function.get("parameters") or tool.get("input_schema") + if params is not None: + tool_def["parameters"] = params + tool_defs.append(tool_def) + span.set_attribute(GenAIAttributes.GEN_AI_TOOL_DEFINITIONS, json.dumps(tool_defs)) def set_llm_request( @@ -126,17 +122,8 @@ def set_llm_request( set_request_params(span, kwargs, span_holder) if should_send_prompts(): - for i, msg in enumerate(prompts): - _set_span_attribute( - span, - f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.role", - "user", - ) - _set_span_attribute( - span, - f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.content", - msg, - ) + messages = [{"role": "user", "content": msg} for msg in prompts] + span.set_attribute(GenAIAttributes.GEN_AI_INPUT_MESSAGES, json.dumps(messages)) def set_chat_request( @@ -148,81 +135,62 @@ def set_chat_request( ) -> None: set_request_params(span, serialized.get("kwargs", {}), span_holder) + functions = kwargs.get("invocation_params", {}).get("functions", []) + if functions: + tool_defs = [ + { + "name": f.get("name"), + "description": f.get("description"), + "parameters": f.get("parameters"), + } + for f in functions + ] + span.set_attribute(GenAIAttributes.GEN_AI_TOOL_DEFINITIONS, json.dumps(tool_defs)) + if should_send_prompts(): - for i, function in enumerate( - kwargs.get("invocation_params", {}).get("functions", []) - ): - prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}" - - _set_span_attribute(span, f"{prefix}.name", function.get("name")) - _set_span_attribute( - span, f"{prefix}.description", function.get("description") - ) - _set_span_attribute( - span, f"{prefix}.parameters", json.dumps(function.get("parameters")) - ) - - i = 0 + input_messages = [] for message in messages: for msg in message: - _set_span_attribute( - span, - f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.role", - _message_type_to_role(msg.type), - ) + msg_obj = {"role": _message_type_to_role(msg.type)} + tool_calls = ( msg.tool_calls if hasattr(msg, "tool_calls") else msg.additional_kwargs.get("tool_calls") ) - if tool_calls: - _set_chat_tool_calls( - span, f"{GenAIAttributes.GEN_AI_PROMPT}.{i}", tool_calls - ) + msg_obj["tool_calls"] = _build_tool_calls_list(tool_calls) - # Always set content if it exists, regardless of tool_calls presence content = ( msg.content if isinstance(msg.content, str) else json.dumps(msg.content, cls=CallbackFilteredJSONEncoder) ) - _set_span_attribute( - span, - f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.content", - content, - ) + if content: + msg_obj["content"] = content if msg.type == "tool" and hasattr(msg, "tool_call_id"): - _set_span_attribute( - span, - f"{GenAIAttributes.GEN_AI_PROMPT}.{i}.tool_call_id", - msg.tool_call_id, - ) + msg_obj["tool_call_id"] = msg.tool_call_id - i += 1 + input_messages.append(msg_obj) + + if input_messages: + span.set_attribute(GenAIAttributes.GEN_AI_INPUT_MESSAGES, json.dumps(input_messages)) def set_chat_response(span: Span, response: LLMResult) -> None: if not should_send_prompts(): return - i = 0 + output_messages = [] for generations in response.generations: for generation in generations: - prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{i}" - if hasattr(generation, "message") and generation.message and hasattr(generation.message, "type"): role = _message_type_to_role(generation.message.type) else: - # For non-chat completions (Generation objects), default to assistant role = "assistant" - _set_span_attribute( - span, - f"{prefix}.role", - role, - ) + msg_obj = {"role": role} # Try to get content from various sources content = None @@ -235,38 +203,19 @@ def set_chat_response(span: Span, response: LLMResult) -> None: content = json.dumps(generation.message.content, cls=CallbackFilteredJSONEncoder) if content: - _set_span_attribute( - span, - f"{prefix}.content", - content, - ) + msg_obj["content"] = content # Set finish reason if available if generation.generation_info and generation.generation_info.get("finish_reason"): - _set_span_attribute( - span, - f"{prefix}.finish_reason", - generation.generation_info.get("finish_reason"), - ) + msg_obj["finish_reason"] = generation.generation_info.get("finish_reason") # Handle tool calls and function calls if hasattr(generation, "message") and generation.message: # Handle legacy function_call format (single function call) if generation.message.additional_kwargs.get("function_call"): - _set_span_attribute( - span, - f"{prefix}.tool_calls.0.name", - generation.message.additional_kwargs.get("function_call").get( - "name" - ), - ) - _set_span_attribute( - span, - f"{prefix}.tool_calls.0.arguments", - generation.message.additional_kwargs.get("function_call").get( - "arguments" - ), - ) + fc = generation.message.additional_kwargs.get("function_call") + msg_obj["role"] = "assistant" + msg_obj["tool_calls"] = [{"name": fc.get("name"), "arguments": fc.get("arguments")}] # Handle new tool_calls format (multiple tool calls) tool_calls = ( @@ -275,13 +224,13 @@ def set_chat_response(span: Span, response: LLMResult) -> None: else generation.message.additional_kwargs.get("tool_calls") ) if tool_calls and isinstance(tool_calls, list): - _set_span_attribute( - span, - f"{prefix}.role", - "assistant", - ) - _set_chat_tool_calls(span, prefix, tool_calls) - i += 1 + msg_obj["role"] = "assistant" + msg_obj["tool_calls"] = _build_tool_calls_list(tool_calls) + + output_messages.append(msg_obj) + + if output_messages: + span.set_attribute(GenAIAttributes.GEN_AI_OUTPUT_MESSAGES, json.dumps(output_messages)) def set_chat_response_usage( @@ -325,9 +274,8 @@ def set_chat_response_usage( "input_token_details", {} ) cache_read_tokens += input_token_details.get("cache_read", 0) - except Exception as e: + except Exception: # If there's any issue processing usage metadata, continue without it - print(f"DEBUG: Error processing usage metadata: {e}") pass if ( @@ -348,16 +296,16 @@ def set_chat_response_usage( ) _set_span_attribute( span, - SpanAttributes.LLM_USAGE_TOTAL_TOKENS, + SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens, ) _set_span_attribute( span, - SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, + SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens, ) if record_token_usage: - vendor = span.attributes.get(GenAIAttributes.GEN_AI_SYSTEM, "Langchain") + vendor = span.attributes.get(GenAIAttributes.GEN_AI_SYSTEM, "langchain") if input_tokens > 0: token_histogram.record( @@ -397,11 +345,9 @@ def _extract_model_name_from_association_metadata(metadata: Optional[dict[str, A return "unknown" -def _set_chat_tool_calls( - span: Span, prefix: str, tool_calls: list[dict[str, Any]] -) -> None: - for idx, tool_call in enumerate(tool_calls): - tool_call_prefix = f"{prefix}.tool_calls.{idx}" +def _build_tool_calls_list(tool_calls: list[dict[str, Any]]) -> list[dict[str, Any]]: + result = [] + for tool_call in tool_calls: tool_call_dict = dict(tool_call) tool_id = tool_call_dict.get("id") tool_name = tool_call_dict.get( @@ -411,14 +357,12 @@ def _set_chat_tool_calls( "args", tool_call_dict.get("function", {}).get("arguments") ) - _set_span_attribute(span, f"{tool_call_prefix}.id", tool_id) - _set_span_attribute( - span, - f"{tool_call_prefix}.name", - tool_name, - ) - _set_span_attribute( - span, - f"{tool_call_prefix}.arguments", - json.dumps(tool_args, cls=CallbackFilteredJSONEncoder), - ) + call_obj = {} + if tool_id: + call_obj["id"] = tool_id + if tool_name: + call_obj["name"] = tool_name + if tool_args is not None: + call_obj["arguments"] = json.dumps(tool_args, cls=CallbackFilteredJSONEncoder) + result.append(call_obj) + return result diff --git a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/vendor_detection.py b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/vendor_detection.py index 887e174523..0e003b8f25 100644 --- a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/vendor_detection.py +++ b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/vendor_detection.py @@ -26,7 +26,7 @@ def _get_vendor_rules() -> List[VendorRule]: VendorRule( exact_matches={"AzureChatOpenAI", "AzureOpenAI", "AzureOpenAIEmbeddings"}, patterns=["azure"], - vendor_name="Azure" + vendor_name="az.ai.openai" ), VendorRule( exact_matches={"ChatOpenAI", "OpenAI", "OpenAIEmbeddings"}, @@ -36,12 +36,12 @@ def _get_vendor_rules() -> List[VendorRule]: VendorRule( exact_matches={"ChatBedrock", "BedrockEmbeddings", "Bedrock", "BedrockChat"}, patterns=["bedrock", "aws"], - vendor_name="AWS" + vendor_name="aws.bedrock" ), VendorRule( exact_matches={"ChatAnthropic", "AnthropicLLM"}, patterns=["anthropic"], - vendor_name="Anthropic" + vendor_name="anthropic" ), VendorRule( exact_matches={ @@ -49,12 +49,12 @@ def _get_vendor_rules() -> List[VendorRule]: "GoogleGenerativeAI", "GooglePaLM", "ChatGooglePaLM" }, patterns=["vertex", "google", "palm", "gemini"], - vendor_name="Google" + vendor_name="gcp.gen_ai" ), VendorRule( exact_matches={"ChatCohere", "CohereEmbeddings", "Cohere"}, patterns=["cohere"], - vendor_name="Cohere" + vendor_name="cohere" ), VendorRule( exact_matches={ @@ -62,37 +62,37 @@ def _get_vendor_rules() -> List[VendorRule]: "HuggingFaceEmbeddings", "ChatHuggingFace" }, patterns=["huggingface"], - vendor_name="HuggingFace" + vendor_name="hugging_face" ), VendorRule( exact_matches={"ChatOllama", "OllamaEmbeddings", "Ollama"}, patterns=["ollama"], - vendor_name="Ollama" + vendor_name="ollama" ), VendorRule( exact_matches={"Together", "ChatTogether"}, patterns=["together"], - vendor_name="Together" + vendor_name="together_ai" ), VendorRule( exact_matches={"Replicate", "ChatReplicate"}, patterns=["replicate"], - vendor_name="Replicate" + vendor_name="replicate" ), VendorRule( exact_matches={"ChatFireworks", "Fireworks"}, patterns=["fireworks"], - vendor_name="Fireworks" + vendor_name="fireworks" ), VendorRule( exact_matches={"ChatGroq"}, patterns=["groq"], - vendor_name="Groq" + vendor_name="groq" ), VendorRule( exact_matches={"ChatMistralAI", "MistralAI"}, patterns=["mistral"], - vendor_name="MistralAI" + vendor_name="mistral_ai" ), ] @@ -109,7 +109,7 @@ def detect_vendor_from_class(class_name: str) -> str: Vendor string, defaults to "Langchain" if no match found """ if not class_name: - return "Langchain" + return "langchain" vendor_rules = _get_vendor_rules() @@ -117,4 +117,4 @@ def detect_vendor_from_class(class_name: str) -> str: if rule.matches(class_name): return rule.vendor_name - return "Langchain" + return "langchain" diff --git a/packages/opentelemetry-instrumentation-langchain/pyproject.toml b/packages/opentelemetry-instrumentation-langchain/pyproject.toml index 1725903e45..03459aea43 100644 --- a/packages/opentelemetry-instrumentation-langchain/pyproject.toml +++ b/packages/opentelemetry-instrumentation-langchain/pyproject.toml @@ -13,7 +13,7 @@ requires-python = ">=3.10,<4" dependencies = [ "opentelemetry-api>=1.38.0,<2", "opentelemetry-instrumentation>=0.59b0", - "opentelemetry-semantic-conventions-ai>=0.4.16,<0.5.0", + "opentelemetry-semantic-conventions-ai>=0.5.0,<0.6.0", "opentelemetry-semantic-conventions>=0.59b0", ] diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py b/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py index bc5631ea47..62669b33ca 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py @@ -127,7 +127,8 @@ def test_sequential_chain(instrument_legacy, span_exporter, log_exporter): (openai_span.attributes[GenAIAttributes.GEN_AI_RESPONSE_MODEL]) == "gpt-3.5-turbo-instruct" ) - assert openai_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] + input_messages = json.loads(openai_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] logs = log_exporter.get_finished_logs() assert ( diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_generation_role_extraction.py b/packages/opentelemetry-instrumentation-langchain/tests/test_generation_role_extraction.py index 60068843a3..8c133e2e1b 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_generation_role_extraction.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_generation_role_extraction.py @@ -6,6 +6,7 @@ in observability traces. """ +import json import pytest from unittest.mock import Mock from langchain_core.outputs import LLMResult, ChatGeneration, Generation @@ -32,98 +33,78 @@ def set_attribute(key, value): def test_chat_generation_with_ai_message_role(self, mock_span, monkeypatch): """Test that ChatGeneration with AIMessage correctly extracts 'assistant' role.""" - # Mock should_send_prompts to return True monkeypatch.setattr( "opentelemetry.instrumentation.langchain.span_utils.should_send_prompts", lambda: True ) - # Create ChatGeneration with AIMessage generation = ChatGeneration(message=AIMessage(content="Hello!")) llm_result = LLMResult(generations=[[generation]]) - # Call the function set_chat_response(mock_span, llm_result) - # Assert role is 'assistant', not 'unknown' - role_key = f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role" - assert role_key in mock_span.attributes - assert mock_span.attributes[role_key] == "assistant" + output_messages = json.loads(mock_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[0]["role"] == "assistant" def test_chat_generation_with_tool_message_role(self, mock_span, monkeypatch): """Test that ChatGeneration with ToolMessage correctly extracts 'tool' role.""" - # Mock should_send_prompts to return True monkeypatch.setattr( "opentelemetry.instrumentation.langchain.span_utils.should_send_prompts", lambda: True ) - # Create ChatGeneration with ToolMessage generation = ChatGeneration( message=ToolMessage(content="Tool result", tool_call_id="123") ) llm_result = LLMResult(generations=[[generation]]) - # Call the function set_chat_response(mock_span, llm_result) - # Assert role is 'tool', not 'unknown' - role_key = f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role" - assert role_key in mock_span.attributes - assert mock_span.attributes[role_key] == "tool" + output_messages = json.loads(mock_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[0]["role"] == "tool" def test_generation_without_message_defaults_to_assistant(self, mock_span, monkeypatch): """Test that Generation (non-chat) defaults to 'assistant' role.""" - # Mock should_send_prompts to return True monkeypatch.setattr( "opentelemetry.instrumentation.langchain.span_utils.should_send_prompts", lambda: True ) - # Create Generation without message (legacy completion) generation = Generation(text="This is a completion") llm_result = LLMResult(generations=[[generation]]) - # Call the function set_chat_response(mock_span, llm_result) - # Assert role defaults to 'assistant', not 'unknown' - role_key = f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role" - assert role_key in mock_span.attributes - assert mock_span.attributes[role_key] == "assistant" + output_messages = json.loads(mock_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[0]["role"] == "assistant" def test_multiple_generations_with_different_roles(self, mock_span, monkeypatch): """Test that multiple generations with different message types are handled correctly.""" - # Mock should_send_prompts to return True monkeypatch.setattr( "opentelemetry.instrumentation.langchain.span_utils.should_send_prompts", lambda: True ) - # Create multiple generations with different message types gen1 = ChatGeneration(message=AIMessage(content="AI response")) gen2 = ChatGeneration(message=ToolMessage(content="Tool result", tool_call_id="123")) gen3 = Generation(text="Legacy completion") llm_result = LLMResult(generations=[[gen1], [gen2], [gen3]]) - # Call the function set_chat_response(mock_span, llm_result) - # Assert all roles are correctly set - assert mock_span.attributes[f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role"] == "assistant" - assert mock_span.attributes[f"{GenAIAttributes.GEN_AI_COMPLETION}.1.role"] == "tool" - assert mock_span.attributes[f"{GenAIAttributes.GEN_AI_COMPLETION}.2.role"] == "assistant" + output_messages = json.loads(mock_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[0]["role"] == "assistant" + assert output_messages[1]["role"] == "tool" + assert output_messages[2]["role"] == "assistant" def test_generation_type_attribute_is_not_used(self, mock_span, monkeypatch): """Test that generation.type (which returns class name) is not used directly.""" - # Mock should_send_prompts to return True monkeypatch.setattr( "opentelemetry.instrumentation.langchain.span_utils.should_send_prompts", lambda: True ) - # Create ChatGeneration - note that generation.type would be "ChatGeneration" generation = ChatGeneration(message=AIMessage(content="Test")) # Verify the bug scenario: generation.type returns class name, not message type @@ -132,11 +113,7 @@ def test_generation_type_attribute_is_not_used(self, mock_span, monkeypatch): llm_result = LLMResult(generations=[[generation]]) - # Call the function set_chat_response(mock_span, llm_result) - # Assert role is 'assistant', not 'unknown' - # If the bug existed, passing generation.type directly to _message_type_to_role - # would return 'unknown' because "ChatGeneration" doesn't match any message type - role_key = f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role" - assert mock_span.attributes[role_key] == "assistant" + output_messages = json.loads(mock_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[0]["role"] == "assistant" diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_langgraph.py b/packages/opentelemetry-instrumentation-langchain/tests/test_langgraph.py index c912106200..7c84283714 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_langgraph.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_langgraph.py @@ -56,7 +56,7 @@ def calculate(state: State): # agent_id removed per maintainer feedback - rely on agent name only assert openai_span.parent.span_id == calculate_task_span.context.span_id - assert openai_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert openai_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert openai_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "gpt-4o" assert ( openai_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] @@ -76,7 +76,7 @@ def calculate(state: State): assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 24 assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 11 - assert openai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 35 + assert openai_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 35 @pytest.mark.vcr diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py b/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py index fcf8767b75..7c5e3f7408 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py @@ -147,17 +147,13 @@ def test_custom_llm(instrument_legacy, span_exporter, log_exporter): span for span in spans if span.name == "HuggingFaceTextGenInference.completion" ) - assert hugging_face_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "completion" + assert hugging_face_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "completion" assert hugging_face_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "unknown" - assert hugging_face_span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "HuggingFace" - assert ( - hugging_face_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "System: You are a helpful assistant\nHuman: tell me a short joke" - ) - assert ( - hugging_face_span.attributes[f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content"] - == response - ) + assert hugging_face_span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "hugging_face" + input_messages = json.loads(hugging_face_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "System: You are a helpful assistant\nHuman: tell me a short joke" + output_messages = json.loads(hugging_face_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[0]["content"] == response logs = log_exporter.get_finished_logs() assert len(logs) == 0, ( @@ -192,7 +188,7 @@ def test_custom_llm_with_events_with_content( span for span in spans if span.name == "HuggingFaceTextGenInference.completion" ) - assert hugging_face_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "completion" + assert hugging_face_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "completion" assert hugging_face_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "unknown" logs = log_exporter.get_finished_logs() @@ -243,7 +239,7 @@ def test_custom_llm_with_events_with_no_content( span for span in spans if span.name == "HuggingFaceTextGenInference.completion" ) - assert hugging_face_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "completion" + assert hugging_face_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "completion" assert hugging_face_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "unknown" logs = log_exporter.get_finished_logs() @@ -283,20 +279,18 @@ def test_openai(instrument_legacy, span_exporter, log_exporter): openai_span = next(span for span in spans if span.name == "ChatOpenAI.chat") - assert openai_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert openai_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert openai_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "gpt-4o-mini" assert openai_span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "openai" - assert ( - (openai_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"]) - == "You are a helpful assistant" - ) - assert (openai_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "system" - assert (openai_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.content"]) == prompt - assert (openai_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.role"]) == "user" + input_messages = json.loads(openai_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "You are a helpful assistant" + assert input_messages[0]["role"] == "system" + assert input_messages[1]["content"] == prompt + assert input_messages[1]["role"] == "user" assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 1497 assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 1037 - assert openai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 2534 + assert openai_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 2534 workflow_span = next( span for span in spans if span.name == "RunnableSequence.workflow" @@ -338,12 +332,12 @@ def test_openai_with_events_with_content( openai_span = next(span for span in spans if span.name == "ChatOpenAI.chat") - assert openai_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert openai_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert openai_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "gpt-4o-mini" assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 1497 assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 1037 - assert openai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 2534 + assert openai_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 2534 logs = log_exporter.get_finished_logs() assert len(logs) == 3 @@ -389,12 +383,12 @@ def test_openai_with_events_with_no_content( openai_span = next(span for span in spans if span.name == "ChatOpenAI.chat") - assert openai_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert openai_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert openai_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "gpt-4o-mini" assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 1497 assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 1037 - assert openai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 2534 + assert openai_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 2534 logs = log_exporter.get_finished_logs() assert len(logs) == 3 @@ -446,33 +440,17 @@ class Joke(BaseModel): openai_span = next(span for span in spans if span.name == "ChatOpenAI.chat") - assert openai_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert openai_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert openai_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" - assert ( - (openai_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"]) - == "You are helpful assistant" - ) - assert (openai_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "system" - assert ( - (openai_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.content"]) - == "tell me a short joke" - ) - assert (openai_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.role"]) == "user" - assert ( - openai_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name"] - == "Joke" - ) - assert ( - openai_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.description"] - == "Joke to tell user." - ) - assert ( - json.loads( - openai_span.attributes[ - f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.parameters" - ] - ) - ) == { + input_messages = json.loads(openai_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "You are helpful assistant" + assert input_messages[0]["role"] == "system" + assert input_messages[1]["content"] == "tell me a short joke" + assert input_messages[1]["role"] == "user" + tool_defs = json.loads(openai_span.attributes[GenAIAttributes.GEN_AI_TOOL_DEFINITIONS]) + assert tool_defs[0]["name"] == "Joke" + assert tool_defs[0]["description"] == "Joke to tell user." + assert tool_defs[0]["parameters"] == { "type": "object", "properties": { "setup": {"description": "question to set up a joke", "type": "string"}, @@ -485,7 +463,7 @@ class Joke(BaseModel): } assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 76 assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 35 - assert openai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 111 + assert openai_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 111 workflow_span = next( span for span in spans if span.name == "RunnableSequence.workflow" @@ -536,12 +514,12 @@ class Joke(BaseModel): openai_span = next(span for span in spans if span.name == "ChatOpenAI.chat") - assert openai_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert openai_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert openai_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 76 assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 35 - assert openai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 111 + assert openai_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 111 logs = log_exporter.get_finished_logs() assert len(logs) == 3 @@ -609,12 +587,12 @@ class Joke(BaseModel): openai_span = next(span for span in spans if span.name == "ChatOpenAI.chat") - assert openai_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert openai_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert openai_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 76 assert openai_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 35 - assert openai_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 111 + assert openai_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 111 logs = log_exporter.get_finished_logs() assert len(logs) == 3 @@ -658,25 +636,18 @@ def test_anthropic(instrument_legacy, span_exporter, log_exporter): span for span in spans if span.name == "RunnableSequence.workflow" ) - assert anthropic_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert anthropic_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "claude-2.1" - assert anthropic_span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "Anthropic" + assert anthropic_span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "anthropic" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE] == 0.5 - assert ( - (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"]) - == "You are a helpful assistant" - ) - assert ( - (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "system" - ) - assert ( - (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.content"]) - == "tell me a short joke" - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.role"]) == "user" + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "You are a helpful assistant" + assert input_messages[0]["role"] == "system" + assert input_messages[1]["content"] == "tell me a short joke" + assert input_messages[1]["role"] == "user" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 19 assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 22 - assert anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 41 + assert anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 41 assert ( anthropic_span.attributes["gen_ai.response.id"] == "msg_017fMG9SRDFTBhcD1ibtN1nK" @@ -725,13 +696,13 @@ def test_anthropic_with_events_with_content( anthropic_span = next(span for span in spans if span.name == "ChatAnthropic.chat") - assert anthropic_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert anthropic_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "claude-2.1" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE] == 0.5 assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 19 assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 22 - assert anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 41 + assert anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 41 assert ( anthropic_span.attributes["gen_ai.response.id"] == "msg_017fMG9SRDFTBhcD1ibtN1nK" @@ -781,13 +752,13 @@ def test_anthropic_with_events_with_no_content( anthropic_span = next(span for span in spans if span.name == "ChatAnthropic.chat") - assert anthropic_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert anthropic_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "claude-2.1" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE] == 0.5 assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 19 assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 22 - assert anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 41 + assert anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 41 assert ( anthropic_span.attributes["gen_ai.response.id"] == "msg_017fMG9SRDFTBhcD1ibtN1nK" @@ -843,25 +814,20 @@ def test_bedrock(instrument_legacy, span_exporter, log_exporter): span for span in spans if span.name == "RunnableSequence.workflow" ) - assert bedrock_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert bedrock_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert ( bedrock_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "anthropic.claude-3-haiku-20240307-v1:0" ) - assert bedrock_span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "AWS" - assert ( - (bedrock_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"]) - == "You are a helpful assistant" - ) - assert (bedrock_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "system" - assert ( - (bedrock_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.content"]) - == "tell me a short joke" - ) - assert (bedrock_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.role"]) == "user" + assert bedrock_span.attributes[GenAIAttributes.GEN_AI_SYSTEM] == "aws.bedrock" + input_messages = json.loads(bedrock_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "You are a helpful assistant" + assert input_messages[0]["role"] == "system" + assert input_messages[1]["content"] == "tell me a short joke" + assert input_messages[1]["role"] == "user" assert bedrock_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 16 assert bedrock_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 27 - assert bedrock_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 43 + assert bedrock_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 43 output = json.loads( workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_OUTPUT] ) @@ -914,7 +880,7 @@ def test_bedrock_with_events_with_content( bedrock_span = next(span for span in spans if span.name == "ChatBedrock.chat") - assert bedrock_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert bedrock_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert ( bedrock_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "anthropic.claude-3-haiku-20240307-v1:0" @@ -922,7 +888,7 @@ def test_bedrock_with_events_with_content( assert bedrock_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 16 assert bedrock_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 27 - assert bedrock_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 43 + assert bedrock_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 43 logs = log_exporter.get_finished_logs() assert len(logs) == 3 @@ -977,14 +943,14 @@ def test_bedrock_with_events_with_no_content( bedrock_span = next(span for span in spans if span.name == "ChatBedrock.chat") - assert bedrock_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" + assert bedrock_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "chat" assert ( bedrock_span.attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL] == "anthropic.claude-3-haiku-20240307-v1:0" ) assert bedrock_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 16 assert bedrock_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] == 27 - assert bedrock_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] == 43 + assert bedrock_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] == 43 logs = log_exporter.get_finished_logs() assert len(logs) == 3 diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_semconv_compliance.py b/packages/opentelemetry-instrumentation-langchain/tests/test_semconv_compliance.py new file mode 100644 index 0000000000..35a01e3380 --- /dev/null +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_semconv_compliance.py @@ -0,0 +1,8 @@ +# ruff: noqa: F401, F403 +""" +Semconv compliance tests re-used from opentelemetry-semantic-conventions-ai. + +Ensures the installed semconv package has the expected constant values. +To add more compliance checks, update _testing.py in that package — not here. +""" +from opentelemetry.semconv_ai._testing import * diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_structured_output.py b/packages/opentelemetry-instrumentation-langchain/tests/test_structured_output.py index 64899bea03..0d2b9e6035 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_structured_output.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_structured_output.py @@ -1,3 +1,4 @@ +import json from typing import List import pytest @@ -32,7 +33,8 @@ def test_structured_output(instrument_legacy, span_exporter, log_exporter): chat_span = next(span for span in spans if span.name == "ChatOpenAI.chat") - assert chat_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] == query_text + input_messages = json.loads(chat_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == query_text logs = log_exporter.get_finished_logs() assert ( diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_tool_call_content.py b/packages/opentelemetry-instrumentation-langchain/tests/test_tool_call_content.py index c0b447940b..834fc27321 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_tool_call_content.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_tool_call_content.py @@ -2,10 +2,11 @@ Test for the fix of the issue where assistant message content is missing when tool calls are present in LangGraph/LangChain instrumentation. -This test reproduces the issue reported in GitHub where gen_ai.prompt.X.content +This test reproduces the issue reported in GitHub where gen_ai.input.messages attributes were missing for assistant messages that contained tool_calls. """ +import json from unittest.mock import Mock from langchain_core.messages import AIMessage, HumanMessage, ToolMessage from opentelemetry.instrumentation.langchain.span_utils import set_chat_request @@ -51,48 +52,23 @@ def test_assistant_message_with_tool_calls_includes_content(): call_args = [call[0] for call in mock_span.set_attribute.call_args_list] attributes = {args[0]: args[1] for args in call_args} - assert f"{GenAIAttributes.GEN_AI_PROMPT}.0.role" in attributes - assert attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"] == "user" - assert f"{GenAIAttributes.GEN_AI_PROMPT}.0.content" in attributes - assert ( - attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "what is the current time? First greet me." - ) - assert f"{GenAIAttributes.GEN_AI_PROMPT}.1.role" in attributes - assert attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.role"] == "assistant" - assert f"{GenAIAttributes.GEN_AI_PROMPT}.1.content" in attributes - assert ( - attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.content"] - == "Hello! Let me check the current time for you." - ) - assert f"{GenAIAttributes.GEN_AI_PROMPT}.1.tool_calls.0.id" in attributes - assert ( - attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.tool_calls.0.id"] - == "call_qU7pH3EdQvzwkPyKPOdpgaKA" - ) - assert f"{GenAIAttributes.GEN_AI_PROMPT}.1.tool_calls.0.name" in attributes - assert ( - attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.tool_calls.0.name"] - == "get_current_time" - ) - assert f"{GenAIAttributes.GEN_AI_PROMPT}.2.role" in attributes - assert attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.2.role"] == "tool" - assert f"{GenAIAttributes.GEN_AI_PROMPT}.2.content" in attributes - assert ( - attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.2.content"] == "2025-08-15 08:15:21" - ) - assert f"{GenAIAttributes.GEN_AI_PROMPT}.2.tool_call_id" in attributes - assert ( - attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.2.tool_call_id"] - == "call_qU7pH3EdQvzwkPyKPOdpgaKA" - ) - assert f"{GenAIAttributes.GEN_AI_PROMPT}.3.role" in attributes - assert attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.3.role"] == "assistant" - assert f"{GenAIAttributes.GEN_AI_PROMPT}.3.content" in attributes - assert ( - attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.3.content"] - == "The current time is 2025-08-15 08:15:21" - ) + assert GenAIAttributes.GEN_AI_INPUT_MESSAGES in attributes + input_messages = json.loads(attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + + assert input_messages[0]["role"] == "user" + assert input_messages[0]["content"] == "what is the current time? First greet me." + + assert input_messages[1]["role"] == "assistant" + assert input_messages[1]["content"] == "Hello! Let me check the current time for you." + assert input_messages[1]["tool_calls"][0]["id"] == "call_qU7pH3EdQvzwkPyKPOdpgaKA" + assert input_messages[1]["tool_calls"][0]["name"] == "get_current_time" + + assert input_messages[2]["role"] == "tool" + assert input_messages[2]["content"] == "2025-08-15 08:15:21" + assert input_messages[2]["tool_call_id"] == "call_qU7pH3EdQvzwkPyKPOdpgaKA" + + assert input_messages[3]["role"] == "assistant" + assert input_messages[3]["content"] == "The current time is 2025-08-15 08:15:21" def test_assistant_message_with_only_tool_calls_no_content(): @@ -121,16 +97,12 @@ def test_assistant_message_with_only_tool_calls_no_content(): call_args = [call[0] for call in mock_span.set_attribute.call_args_list] attributes = {args[0]: args[1] for args in call_args} - assert f"{GenAIAttributes.GEN_AI_PROMPT}.0.role" in attributes - assert attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"] == "assistant" - # Content is being set as empty string, so we expect it to be present - assert f"{GenAIAttributes.GEN_AI_PROMPT}.0.content" in attributes - assert f"{GenAIAttributes.GEN_AI_PROMPT}.0.tool_calls.0.id" in attributes - assert attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.tool_calls.0.id"] == "call_123" - assert f"{GenAIAttributes.GEN_AI_PROMPT}.0.tool_calls.0.name" in attributes - assert ( - attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.tool_calls.0.name"] == "some_tool" - ) + assert GenAIAttributes.GEN_AI_INPUT_MESSAGES in attributes + input_messages = json.loads(attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + + assert input_messages[0]["role"] == "assistant" + assert input_messages[0]["tool_calls"][0]["id"] == "call_123" + assert input_messages[0]["tool_calls"][0]["name"] == "some_tool" def test_assistant_message_with_only_content_no_tool_calls(): @@ -148,16 +120,11 @@ def test_assistant_message_with_only_content_no_tool_calls(): set_chat_request(mock_span, {}, messages, {}, mock_span_holder) call_args = [call[0] for call in mock_span.set_attribute.call_args_list] - attributes = {args[0]: args[1] for args in call_args} - assert f"{GenAIAttributes.GEN_AI_PROMPT}.0.role" in attributes - assert attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"] == "assistant" - assert f"{GenAIAttributes.GEN_AI_PROMPT}.0.content" in attributes - assert ( - attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Just a regular response with no tool calls" - ) + assert GenAIAttributes.GEN_AI_INPUT_MESSAGES in attributes + input_messages = json.loads(attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) - tool_call_attributes = [attr for attr in attributes.keys() if "tool_calls" in attr] - assert len(tool_call_attributes) == 0 + assert input_messages[0]["role"] == "assistant" + assert input_messages[0]["content"] == "Just a regular response with no tool calls" + assert "tool_calls" not in input_messages[0] diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_tool_calls.py b/packages/opentelemetry-instrumentation-langchain/tests/test_tool_calls.py index 1908535164..3e6d8decf0 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_tool_calls.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_tool_calls.py @@ -15,7 +15,6 @@ from opentelemetry.semconv._incubating.attributes import ( gen_ai_attributes as GenAIAttributes, ) -from opentelemetry.semconv_ai import SpanAttributes def food_analysis( @@ -41,8 +40,8 @@ def test_tool_calls(instrument_legacy, span_exporter, log_exporter): # span for span in spans if span.name == "ChatOpenAI.chat" # ) - # assert chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name"] == "food_analysis" - # assert json.loads(chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.parameters"]) == { + # assert chat_span.attributes[f"{GenAIAttributes.GEN_AI_TOOL_DEFINITIONS}.0.name"] == "food_analysis" + # assert json.loads(chat_span.attributes[f"{GenAIAttributes.GEN_AI_TOOL_DEFINITIONS}.0.parameters"]) == { # "properties": { # "name": {"type": "string"}, # "healthy": {"type": "boolean"}, @@ -192,17 +191,9 @@ def get_weather(location: str) -> str: chat_span = spans[0] assert chat_span.name == "ChatOpenAI.chat" - assert chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name"] == "get_weather" - assert json.loads(chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.parameters"]) == { - "properties": { - "location": {"type": "string"}, - }, - "required": ["location"], - "type": "object", - } - - assert chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name"] == "get_weather" - assert json.loads(chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.parameters"]) == { + tool_defs = json.loads(chat_span.attributes[GenAIAttributes.GEN_AI_TOOL_DEFINITIONS]) + assert tool_defs[0]["name"] == "get_weather" + assert tool_defs[0]["parameters"] == { "properties": { "location": {"type": "string"}, }, @@ -480,17 +471,17 @@ def get_news(location: str) -> str: chat_span = spans[0] assert chat_span.name == "ChatAnthropic.chat" - assert chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name"] == "get_weather" - assert json.loads(chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.parameters"]) == { + tool_defs = json.loads(chat_span.attributes[GenAIAttributes.GEN_AI_TOOL_DEFINITIONS]) + assert tool_defs[0]["name"] == "get_weather" + assert tool_defs[0]["parameters"] == { "properties": { "location": {"type": "string"}, }, "required": ["location"], "type": "object", } - - assert chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.name"] == "get_news" - assert json.loads(chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.parameters"]) == { + assert tool_defs[1]["name"] == "get_news" + assert tool_defs[1]["parameters"] == { "properties": { "location": {"type": "string"}, }, @@ -697,17 +688,17 @@ def get_news(location: str) -> str: chat_span = spans[0] assert chat_span.name == "ChatAnthropic.chat" - assert chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name"] == "get_weather" - assert json.loads(chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.parameters"]) == { + tool_defs = json.loads(chat_span.attributes[GenAIAttributes.GEN_AI_TOOL_DEFINITIONS]) + assert tool_defs[0]["name"] == "get_weather" + assert tool_defs[0]["parameters"] == { "properties": { "location": {"type": "string"}, }, "required": ["location"], "type": "object", } - - assert chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.name"] == "get_news" - assert json.loads(chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.parameters"]) == { + assert tool_defs[1]["name"] == "get_news" + assert tool_defs[1]["parameters"] == { "properties": { "location": {"type": "string"}, }, @@ -1045,17 +1036,17 @@ def get_news(location: str) -> str: chat_span = spans[0] assert chat_span.name == "ChatOpenAI.chat" - assert chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name"] == "get_weather" - assert json.loads(chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.parameters"]) == { + tool_defs = json.loads(chat_span.attributes[GenAIAttributes.GEN_AI_TOOL_DEFINITIONS]) + assert tool_defs[0]["name"] == "get_weather" + assert tool_defs[0]["parameters"] == { "properties": { "location": {"type": "string"}, }, "required": ["location"], "type": "object", } - - assert chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.name"] == "get_news" - assert json.loads(chat_span.attributes[f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.1.parameters"]) == { + assert tool_defs[1]["name"] == "get_news" + assert tool_defs[1]["parameters"] == { "properties": { "location": {"type": "string"}, }, diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_vendor_detection.py b/packages/opentelemetry-instrumentation-langchain/tests/test_vendor_detection.py new file mode 100644 index 0000000000..af2dac4b9f --- /dev/null +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_vendor_detection.py @@ -0,0 +1,59 @@ +import pytest +from opentelemetry.instrumentation.langchain.vendor_detection import detect_vendor_from_class + + +@pytest.mark.parametrize("class_name,expected", [ + # Exact matches + ("AzureChatOpenAI", "az.ai.openai"), + ("AzureOpenAI", "az.ai.openai"), + ("AzureOpenAIEmbeddings", "az.ai.openai"), + ("ChatOpenAI", "openai"), + ("OpenAI", "openai"), + ("OpenAIEmbeddings", "openai"), + ("ChatBedrock", "aws.bedrock"), + ("BedrockEmbeddings", "aws.bedrock"), + ("Bedrock", "aws.bedrock"), + ("BedrockChat", "aws.bedrock"), + ("ChatAnthropic", "anthropic"), + ("AnthropicLLM", "anthropic"), + ("ChatVertexAI", "gcp.gen_ai"), + ("VertexAI", "gcp.gen_ai"), + ("ChatGoogleGenerativeAI", "gcp.gen_ai"), + ("GoogleGenerativeAI", "gcp.gen_ai"), + ("ChatCohere", "cohere"), + ("Cohere", "cohere"), + ("HuggingFacePipeline", "hugging_face"), + ("HuggingFaceTextGenInference", "hugging_face"), + ("ChatHuggingFace", "hugging_face"), + ("ChatOllama", "ollama"), + ("Ollama", "ollama"), + ("Together", "together_ai"), + ("ChatTogether", "together_ai"), + ("Replicate", "replicate"), + ("ChatReplicate", "replicate"), + ("ChatFireworks", "fireworks"), + ("Fireworks", "fireworks"), + ("ChatGroq", "groq"), + ("ChatMistralAI", "mistral_ai"), + ("MistralAI", "mistral_ai"), + # Pattern matches + ("SomeAzureModel", "az.ai.openai"), + ("CustomOpenAIModel", "openai"), + ("AwsBedrockModel", "aws.bedrock"), + ("AnthropicCustom", "anthropic"), + ("VertexCustom", "gcp.gen_ai"), + ("GeminiModel", "gcp.gen_ai"), + ("CohereCustom", "cohere"), + ("OllamaCustom", "ollama"), + ("TogetherCustom", "together_ai"), + ("ReplicateCustom", "replicate"), + ("FireworksCustom", "fireworks"), + ("GroqCustom", "groq"), + ("MistralCustom", "mistral_ai"), + # Default fallback + ("UnknownModel", "langchain"), + ("", "langchain"), + (None, "langchain"), +]) +def test_detect_vendor_from_class(class_name, expected): + assert detect_vendor_from_class(class_name) == expected diff --git a/packages/opentelemetry-instrumentation-writer/pyproject.toml b/packages/opentelemetry-instrumentation-writer/pyproject.toml index df9d3e8a7a..fe26396fcd 100644 --- a/packages/opentelemetry-instrumentation-writer/pyproject.toml +++ b/packages/opentelemetry-instrumentation-writer/pyproject.toml @@ -12,7 +12,7 @@ requires-python = ">=3.10,<4" dependencies = [ "opentelemetry-api>=1.38.0,<2", "opentelemetry-instrumentation>=0.59b0", - "opentelemetry-semantic-conventions-ai>=0.4.11", + "opentelemetry-semantic-conventions-ai>=0.4.11,<0.5.0", "opentelemetry-semantic-conventions>=0.59b0", ] diff --git a/packages/opentelemetry-semantic-conventions-ai/MIGRATION.md b/packages/opentelemetry-semantic-conventions-ai/MIGRATION.md new file mode 100644 index 0000000000..dd0426a315 --- /dev/null +++ b/packages/opentelemetry-semantic-conventions-ai/MIGRATION.md @@ -0,0 +1,189 @@ +# Migration Guide: opentelemetry-semantic-conventions-ai v0.4.x → v0.5.x + +This guide covers breaking changes introduced when aligning the `opentelemetry-semantic-conventions-ai` +package with the upstream [OTel GenAI semantic conventions](https://opentelemetry.io/docs/specs/semconv/gen-ai/). + +--- + +## 1. Removed constants (previously duplicated upstream) + +These `SpanAttributes` constants have been **removed**. They are now part of the official +`opentelemetry-semantic-conventions` package. Import them directly from upstream. + +```python +# Before +from opentelemetry.semconv_ai import SpanAttributes +span.set_attribute(SpanAttributes.LLM_SYSTEM, "openai") + +# After +from opentelemetry.semconv._incubating.attributes import gen_ai_attributes as GenAIAttributes +span.set_attribute(GenAIAttributes.GEN_AI_SYSTEM, "openai") +``` + +| Removed constant | Upstream replacement | +|---|---| +| `SpanAttributes.LLM_SYSTEM` | `GenAIAttributes.GEN_AI_SYSTEM` | +| `SpanAttributes.LLM_REQUEST_MODEL` | `GenAIAttributes.GEN_AI_REQUEST_MODEL` | +| `SpanAttributes.LLM_REQUEST_MAX_TOKENS` | `GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS` | +| `SpanAttributes.LLM_REQUEST_TEMPERATURE` | `GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE` | +| `SpanAttributes.LLM_REQUEST_TOP_P` | `GenAIAttributes.GEN_AI_REQUEST_TOP_P` | +| `SpanAttributes.LLM_TOP_K` | `GenAIAttributes.GEN_AI_REQUEST_TOP_K` | +| `SpanAttributes.LLM_CHAT_STOP_SEQUENCES` | `GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES` | +| `SpanAttributes.LLM_FREQUENCY_PENALTY` | `GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY` | +| `SpanAttributes.LLM_PRESENCE_PENALTY` | `GenAIAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY` | +| `SpanAttributes.LLM_RESPONSE_MODEL` | `GenAIAttributes.GEN_AI_RESPONSE_MODEL` | +| `SpanAttributes.LLM_USAGE_COMPLETION_TOKENS` | `GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS` | +| `SpanAttributes.LLM_USAGE_PROMPT_TOKENS` | `GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS` | +| `SpanAttributes.LLM_TOKEN_TYPE` | `GenAIAttributes.GEN_AI_TOKEN_TYPE` | +| `SpanAttributes.LLM_REQUEST_FUNCTIONS` | `GenAIAttributes.GEN_AI_TOOL_DEFINITIONS` | +| `SpanAttributes.LLM_PROMPTS` | `GenAIAttributes.GEN_AI_PROMPT` | +| `SpanAttributes.LLM_COMPLETIONS` | `GenAIAttributes.GEN_AI_COMPLETION` | +| `SpanAttributes.LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT` | `GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT` | +| `SpanAttributes.LLM_REQUEST_TYPE` | `GenAIAttributes.GEN_AI_OPERATION_NAME` | + +> **Note on `LLM_REQUEST_TYPE`**: The old `LLMRequestTypeValues` enum is replaced by +> `GenAiOperationNameValues` from upstream, or by `GenAICustomOperationName` for +> project-specific operation names. + +--- + +## 2. Renamed constants (stay in `SpanAttributes`, new `GEN_AI_*` prefix) + +These constants remain in the `opentelemetry-semantic-conventions-ai` package but their +Python names have been renamed from `LLM_*` to `GEN_AI_*`. + +```python +# Before +from opentelemetry.semconv_ai import SpanAttributes +span.set_attribute(SpanAttributes.LLM_IS_STREAMING, True) + +# After +from opentelemetry.semconv_ai import SpanAttributes +span.set_attribute(SpanAttributes.GEN_AI_IS_STREAMING, True) +``` + +| Old name | New name | +|---|---| +| `SpanAttributes.LLM_USAGE_TOTAL_TOKENS` | `SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS` | +| `SpanAttributes.LLM_USER` | `SpanAttributes.GEN_AI_USER` | +| `SpanAttributes.LLM_HEADERS` | `SpanAttributes.GEN_AI_HEADERS` | +| `SpanAttributes.LLM_IS_STREAMING` | `SpanAttributes.GEN_AI_IS_STREAMING` | +| `SpanAttributes.LLM_REQUEST_REPETITION_PENALTY` | `SpanAttributes.GEN_AI_REQUEST_REPETITION_PENALTY` | +| `SpanAttributes.LLM_REQUEST_REASONING_EFFORT` | `SpanAttributes.GEN_AI_REQUEST_REASONING_EFFORT` | +| `SpanAttributes.LLM_REQUEST_REASONING_SUMMARY` | `SpanAttributes.GEN_AI_REQUEST_REASONING_SUMMARY` | +| `SpanAttributes.LLM_RESPONSE_REASONING_EFFORT` | `SpanAttributes.GEN_AI_RESPONSE_REASONING_EFFORT` | +| `SpanAttributes.LLM_RESPONSE_FINISH_REASON` | `SpanAttributes.GEN_AI_RESPONSE_FINISH_REASON` | +| `SpanAttributes.LLM_RESPONSE_STOP_REASON` | `SpanAttributes.GEN_AI_RESPONSE_STOP_REASON` | +| `SpanAttributes.LLM_CONTENT_COMPLETION_CHUNK` | `SpanAttributes.GEN_AI_CONTENT_COMPLETION_CHUNK` | +| `SpanAttributes.LLM_USAGE_REASONING_TOKENS` | `SpanAttributes.GEN_AI_USAGE_REASONING_TOKENS` | +| `SpanAttributes.LLM_USAGE_TOKEN_TYPE` | `SpanAttributes.GEN_AI_USAGE_TOKEN_TYPE` | +| `SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS` | `SpanAttributes.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS` ¹ | +| `SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS` | `SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS` ¹ | +| `SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA` | `SpanAttributes.GEN_AI_REQUEST_STRUCTURED_OUTPUT_SCHEMA` | +| `SpanAttributes.LLM_OPENAI_API_BASE` | `SpanAttributes.GEN_AI_OPENAI_API_BASE` | +| `SpanAttributes.LLM_OPENAI_API_VERSION` | `SpanAttributes.GEN_AI_OPENAI_API_VERSION` | +| `SpanAttributes.LLM_OPENAI_API_TYPE` | `SpanAttributes.GEN_AI_OPENAI_API_TYPE` | +| `SpanAttributes.LLM_DECODING_METHOD` | `SpanAttributes.GEN_AI_WATSONX_DECODING_METHOD` | +| `SpanAttributes.LLM_RANDOM_SEED` | `SpanAttributes.GEN_AI_WATSONX_RANDOM_SEED` | +| `SpanAttributes.LLM_MAX_NEW_TOKENS` | `SpanAttributes.GEN_AI_WATSONX_MAX_NEW_TOKENS` | +| `SpanAttributes.LLM_MIN_NEW_TOKENS` | `SpanAttributes.GEN_AI_WATSONX_MIN_NEW_TOKENS` | +| `SpanAttributes.LLM_REPETITION_PENALTY` | `SpanAttributes.GEN_AI_WATSONX_REPETITION_PENALTY` | + +> ¹ The string value of these two cache-token attributes **also changed** — see [section 3](#cache-token-attributes). + +--- + +## 3. Changed string values + +Some constants kept their Python name but the underlying **string value** changed. + +### Cache token attributes + +| Python name | Old string value | New string value | +|---|---|---| +| `SpanAttributes.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS` | `gen_ai.usage.cache_creation_input_tokens` | `gen_ai.usage.cache_creation.input_tokens` | +| `SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS` | `gen_ai.usage.cache_read_input_tokens` | `gen_ai.usage.cache_read.input_tokens` | + +> **Dashboard impact**: Update any Grafana queries, alerts, or OTLP processors that filter on +> these attribute names. + +### `GenAISystem` values + +All `GenAISystem` enum values now use the OTel spec canonical form (lowercase / dot-separated). + +| Enum member | Old value | New value | +|---|---|---| +| `GenAISystem.ANTHROPIC` | `"Anthropic"` | `"anthropic"` | +| `GenAISystem.COHERE` | `"Cohere"` | `"cohere"` | +| `GenAISystem.MISTRALAI` | `"MistralAI"` | `"mistral_ai"` | +| `GenAISystem.OLLAMA` | `"Ollama"` | `"ollama"` | +| `GenAISystem.GROQ` | `"Groq"` | `"groq"` | +| `GenAISystem.ALEPH_ALPHA` | `"AlephAlpha"` | `"aleph_alpha"` | +| `GenAISystem.REPLICATE` | `"Replicate"` | `"replicate"` | +| `GenAISystem.TOGETHER_AI` | `"TogetherAI"` | `"together_ai"` | +| `GenAISystem.WATSONX` | `"Watsonx"` | `"ibm.watsonx.ai"` | +| `GenAISystem.HUGGINGFACE` | `"HuggingFace"` | `"hugging_face"` | +| `GenAISystem.FIREWORKS` | `"Fireworks"` | `"fireworks"` | +| `GenAISystem.AZURE` | `"Azure"` | `"az.ai.openai"` | +| `GenAISystem.AWS` | `"AWS"` | `"aws.bedrock"` | +| `GenAISystem.GOOGLE` | `"Google"` | `"gcp.gen_ai"` | +| `GenAISystem.OPENROUTER` | `"OpenRouter"` | `"openrouter"` | +| `GenAISystem.LANGCHAIN` | `"Langchain"` | `"langchain"` | + +> `GenAISystem.OPENAI` (`"openai"`) is unchanged. + +> **Dashboard impact**: Update dashboards, alerts, and OTLP processors that filter on +> `gen_ai.system` to use the new lowercase values shown above. + +--- + +## 4. Tool definitions format change + +Tool definitions are now encoded as a **single JSON-array attribute** instead of per-field +indexed sub-attributes. + +```python +# Before — multiple flat attributes +span.set_attribute("gen_ai.tool.definitions.0.name", "my_tool") +span.set_attribute("gen_ai.tool.definitions.0.description", "Does something") +span.set_attribute("gen_ai.tool.definitions.0.parameters", json.dumps({...})) + +# After — one JSON array attribute +import json +tool_defs = [ + { + "name": "my_tool", + "description": "Does something", + "parameters": {...}, + } +] +span.set_attribute(GenAIAttributes.GEN_AI_TOOL_DEFINITIONS, json.dumps(tool_defs)) +``` + +> **Dashboard impact**: Dashboards that expand `gen_ai.tool.definitions.{i}.name` as individual +> attributes will no longer find them. Parse the JSON value of `gen_ai.tool.definitions` instead. + +--- + +## 5. Quickstart: minimal import update + +```python +# Before +from opentelemetry.semconv_ai import SpanAttributes + +SpanAttributes.LLM_SYSTEM # removed +SpanAttributes.LLM_REQUEST_MODEL # removed +SpanAttributes.LLM_REQUEST_TYPE # removed +SpanAttributes.LLM_IS_STREAMING # renamed +SpanAttributes.LLM_USAGE_TOTAL_TOKENS # renamed + +# After +from opentelemetry.semconv_ai import SpanAttributes +from opentelemetry.semconv._incubating.attributes import gen_ai_attributes as GenAIAttributes + +GenAIAttributes.GEN_AI_SYSTEM # upstream +GenAIAttributes.GEN_AI_REQUEST_MODEL # upstream +GenAIAttributes.GEN_AI_OPERATION_NAME # upstream +SpanAttributes.GEN_AI_IS_STREAMING # project semconv (renamed) +SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS # project semconv (renamed) +``` diff --git a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py index 8bb286d0e9..1bc3fec034 100644 --- a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py +++ b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py @@ -7,29 +7,31 @@ class GenAISystem(Enum): """ Supported LLM vendor (System) names used across OpenLLMetry instrumentations. - These values match the actual strings used in span attributes (LLM_SYSTEM) - throughout the instrumentation packages. + Values that have a counterpart in the official OTel GenAI semantic conventions + (opentelemetry.semconv._incubating.attributes.gen_ai_attributes.GenAiSystemValues) + use the spec-defined lowercase string. Values without an OTel counterpart use + lowercase-with-underscores as a project convention. """ OPENAI = "openai" - ANTHROPIC = "Anthropic" - COHERE = "Cohere" - MISTRALAI = "MistralAI" - OLLAMA = "Ollama" - GROQ = "Groq" - ALEPH_ALPHA = "AlephAlpha" - REPLICATE = "Replicate" - TOGETHER_AI = "TogetherAI" - WATSONX = "Watsonx" - HUGGINGFACE = "HuggingFace" - FIREWORKS = "Fireworks" - - AZURE = "Azure" - AWS = "AWS" - GOOGLE = "Google" - OPENROUTER = "OpenRouter" - - LANGCHAIN = "Langchain" + ANTHROPIC = "anthropic" + COHERE = "cohere" + MISTRALAI = "mistral_ai" + OLLAMA = "ollama" + GROQ = "groq" + ALEPH_ALPHA = "aleph_alpha" + REPLICATE = "replicate" + TOGETHER_AI = "together_ai" + WATSONX = "ibm.watsonx.ai" + HUGGINGFACE = "hugging_face" + FIREWORKS = "fireworks" + + AZURE = "az.ai.openai" + AWS = "aws.bedrock" + GOOGLE = "gcp.gen_ai" + OPENROUTER = "openrouter" + + LANGCHAIN = "langchain" CREWAI = "crewai" @@ -62,52 +64,32 @@ class Meters: class SpanAttributes: - # GenAI Usage Cache Attributes (missing from incubating semantic conventions) - GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS = "gen_ai.usage.cache_creation_input_tokens" - GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS = "gen_ai.usage.cache_read_input_tokens" - - # LLM Cache Attributes (legacy naming - keeping for backward compatibility) - LLM_SYSTEM = "gen_ai.system" - LLM_REQUEST_MODEL = "gen_ai.request.model" - LLM_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens" - LLM_REQUEST_TEMPERATURE = "gen_ai.request.temperature" - LLM_REQUEST_TOP_P = "gen_ai.request.top_p" - LLM_PROMPTS = "gen_ai.prompt" - LLM_COMPLETIONS = "gen_ai.completion" - LLM_RESPONSE_MODEL = "gen_ai.response.model" - LLM_USAGE_COMPLETION_TOKENS = "gen_ai.usage.completion_tokens" - LLM_USAGE_PROMPT_TOKENS = "gen_ai.usage.prompt_tokens" - LLM_USAGE_CACHE_CREATION_INPUT_TOKENS = "gen_ai.usage.cache_creation_input_tokens" - LLM_USAGE_CACHE_READ_INPUT_TOKENS = "gen_ai.usage.cache_read_input_tokens" - LLM_TOKEN_TYPE = "gen_ai.token.type" - LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA = "gen_ai.request.structured_output_schema" - LLM_REQUEST_REASONING_SUMMARY = "gen_ai.request.reasoning_summary" - LLM_RESPONSE_REASONING_EFFORT = "gen_ai.response.reasoning_effort" - - # LLM - LLM_REQUEST_TYPE = "llm.request.type" - LLM_USAGE_TOTAL_TOKENS = "llm.usage.total_tokens" - LLM_USAGE_TOKEN_TYPE = "llm.usage.token_type" - LLM_USER = "llm.user" - LLM_HEADERS = "llm.headers" - LLM_TOP_K = "llm.top_k" - LLM_IS_STREAMING = "llm.is_streaming" - LLM_FREQUENCY_PENALTY = "llm.frequency_penalty" - LLM_PRESENCE_PENALTY = "llm.presence_penalty" - LLM_CHAT_STOP_SEQUENCES = "llm.chat.stop_sequences" - LLM_REQUEST_FUNCTIONS = "llm.request.functions" - LLM_REQUEST_REPETITION_PENALTY = "llm.request.repetition_penalty" - LLM_RESPONSE_FINISH_REASON = "llm.response.finish_reason" - LLM_RESPONSE_STOP_REASON = "llm.response.stop_reason" - LLM_CONTENT_COMPLETION_CHUNK = "llm.content.completion.chunk" - LLM_REQUEST_REASONING_EFFORT = "llm.request.reasoning_effort" - LLM_USAGE_REASONING_TOKENS = "llm.usage.reasoning_tokens" + # GenAI Usage Cache Attributes (not yet in upstream OTel incubating semconv) + GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS = "gen_ai.usage.cache_creation.input_tokens" + GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS = "gen_ai.usage.cache_read.input_tokens" + + # LLM — project-policy attributes (not in upstream OTel spec) + GEN_AI_USAGE_TOTAL_TOKENS = "gen_ai.usage.total_tokens" + GEN_AI_USAGE_TOKEN_TYPE = "gen_ai.usage.token_type" + GEN_AI_USER = "gen_ai.user" + GEN_AI_HEADERS = "gen_ai.headers" + GEN_AI_IS_STREAMING = "gen_ai.is_streaming" + GEN_AI_REQUEST_REPETITION_PENALTY = "gen_ai.request.repetition_penalty" + GEN_AI_RESPONSE_FINISH_REASON = "gen_ai.response.finish_reason" + GEN_AI_RESPONSE_STOP_REASON = "gen_ai.response.stop_reason" + GEN_AI_CONTENT_COMPLETION_CHUNK = "gen_ai.content.completion.chunk" + GEN_AI_REQUEST_REASONING_EFFORT = "gen_ai.request.reasoning_effort" + GEN_AI_USAGE_REASONING_TOKENS = "gen_ai.usage.reasoning_tokens" + GEN_AI_REQUEST_N = "gen_ai.request.n" + GEN_AI_REQUEST_MAX_COMPLETION_TOKENS = "gen_ai.request.max_completion_tokens" + GEN_AI_REQUEST_STRUCTURED_OUTPUT_SCHEMA = "gen_ai.request.structured_output_schema" + GEN_AI_REQUEST_REASONING_SUMMARY = "gen_ai.request.reasoning_summary" + GEN_AI_RESPONSE_REASONING_EFFORT = "gen_ai.response.reasoning_effort" # OpenAI - LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT = "gen_ai.openai.system_fingerprint" - LLM_OPENAI_API_BASE = "gen_ai.openai.api_base" - LLM_OPENAI_API_VERSION = "gen_ai.openai.api_version" - LLM_OPENAI_API_TYPE = "gen_ai.openai.api_type" + GEN_AI_OPENAI_API_BASE = "gen_ai.openai.api_base" + GEN_AI_OPENAI_API_VERSION = "gen_ai.openai.api_version" + GEN_AI_OPENAI_API_TYPE = "gen_ai.openai.api_type" # Haystack HAYSTACK_OPENAI_CHAT = "haystack.openai.chat" @@ -152,11 +134,11 @@ class SpanAttributes: TRACELOOP_CORRELATION_ID = "traceloop.correlation.id" # Watson/genai LLM - LLM_DECODING_METHOD = "llm.watsonx.decoding_method" - LLM_RANDOM_SEED = "llm.watsonx.random_seed" - LLM_MAX_NEW_TOKENS = "llm.watsonx.max_new_tokens" - LLM_MIN_NEW_TOKENS = "llm.watsonx.min_new_tokens" - LLM_REPETITION_PENALTY = "llm.watsonx.repetition_penalty" + GEN_AI_WATSONX_DECODING_METHOD = "llm.watsonx.decoding_method" + GEN_AI_WATSONX_RANDOM_SEED = "llm.watsonx.random_seed" + GEN_AI_WATSONX_MAX_NEW_TOKENS = "llm.watsonx.max_new_tokens" + GEN_AI_WATSONX_MIN_NEW_TOKENS = "llm.watsonx.min_new_tokens" + GEN_AI_WATSONX_REPETITION_PENALTY = "llm.watsonx.repetition_penalty" # Chroma db CHROMADB_ADD_IDS_COUNT = "db.chroma.add.ids_count" diff --git a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/_testing.py b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/_testing.py new file mode 100644 index 0000000000..bf203ab819 --- /dev/null +++ b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/_testing.py @@ -0,0 +1,383 @@ +""" +Shared compliance test classes for opentelemetry-semantic-conventions-ai. + +Import these classes in any instrumentation package's test suite to verify +that the installed semconv constants have the expected values: + + from opentelemetry.semconv_ai._testing import * # noqa: F401, F403 + +pytest will discover and run all Test* classes that end up in the module +namespace, so a single import line is enough. +""" + +import pytest +from opentelemetry.semconv_ai import GenAISystem, Meters, SpanAttributes + + +# --------------------------------------------------------------------------- +# SpanAttributes — renamed constants (LLM_* → GEN_AI_*) +# --------------------------------------------------------------------------- + + +class TestSpanAttributesGENAIRenamed: + """Verify all renamed LLM_* → GEN_AI_* constants have the correct string values.""" + + def test_gen_ai_usage_total_tokens(self): + assert SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS == "gen_ai.usage.total_tokens" + + def test_gen_ai_usage_token_type(self): + assert SpanAttributes.GEN_AI_USAGE_TOKEN_TYPE == "gen_ai.usage.token_type" + + def test_gen_ai_user(self): + assert SpanAttributes.GEN_AI_USER == "gen_ai.user" + + def test_gen_ai_headers(self): + assert SpanAttributes.GEN_AI_HEADERS == "gen_ai.headers" + + def test_gen_ai_is_streaming(self): + assert SpanAttributes.GEN_AI_IS_STREAMING == "gen_ai.is_streaming" + + def test_gen_ai_request_repetition_penalty(self): + assert SpanAttributes.GEN_AI_REQUEST_REPETITION_PENALTY == "gen_ai.request.repetition_penalty" + + def test_gen_ai_response_finish_reason(self): + assert SpanAttributes.GEN_AI_RESPONSE_FINISH_REASON == "gen_ai.response.finish_reason" + + def test_gen_ai_response_stop_reason(self): + assert SpanAttributes.GEN_AI_RESPONSE_STOP_REASON == "gen_ai.response.stop_reason" + + def test_gen_ai_content_completion_chunk(self): + assert SpanAttributes.GEN_AI_CONTENT_COMPLETION_CHUNK == "gen_ai.content.completion.chunk" + + def test_gen_ai_request_reasoning_effort(self): + assert SpanAttributes.GEN_AI_REQUEST_REASONING_EFFORT == "gen_ai.request.reasoning_effort" + + def test_gen_ai_usage_reasoning_tokens(self): + assert SpanAttributes.GEN_AI_USAGE_REASONING_TOKENS == "gen_ai.usage.reasoning_tokens" + + def test_gen_ai_request_n(self): + assert SpanAttributes.GEN_AI_REQUEST_N == "gen_ai.request.n" + + def test_gen_ai_request_max_completion_tokens(self): + assert SpanAttributes.GEN_AI_REQUEST_MAX_COMPLETION_TOKENS == "gen_ai.request.max_completion_tokens" + + def test_gen_ai_request_structured_output_schema(self): + assert SpanAttributes.GEN_AI_REQUEST_STRUCTURED_OUTPUT_SCHEMA == "gen_ai.request.structured_output_schema" + + def test_gen_ai_request_reasoning_summary(self): + assert SpanAttributes.GEN_AI_REQUEST_REASONING_SUMMARY == "gen_ai.request.reasoning_summary" + + def test_gen_ai_response_reasoning_effort(self): + assert SpanAttributes.GEN_AI_RESPONSE_REASONING_EFFORT == "gen_ai.response.reasoning_effort" + + def test_gen_ai_openai_api_base(self): + assert SpanAttributes.GEN_AI_OPENAI_API_BASE == "gen_ai.openai.api_base" + + def test_gen_ai_openai_api_version(self): + assert SpanAttributes.GEN_AI_OPENAI_API_VERSION == "gen_ai.openai.api_version" + + def test_gen_ai_openai_api_type(self): + assert SpanAttributes.GEN_AI_OPENAI_API_TYPE == "gen_ai.openai.api_type" + + +# --------------------------------------------------------------------------- +# SpanAttributes — old LLM_* names must be gone +# --------------------------------------------------------------------------- + + +class TestSpanAttributesOldNamesGone: + """Assert that removed LLM_* constants no longer exist on SpanAttributes.""" + + @pytest.mark.parametrize( + "old_name", + [ + "LLM_SYSTEM", + "LLM_REQUEST_MODEL", + "LLM_REQUEST_MAX_TOKENS", + "LLM_REQUEST_TEMPERATURE", + "LLM_REQUEST_TOP_P", + "LLM_PROMPTS", + "LLM_COMPLETIONS", + "LLM_RESPONSE_MODEL", + "LLM_USAGE_COMPLETION_TOKENS", + "LLM_USAGE_PROMPT_TOKENS", + "LLM_USAGE_CACHE_CREATION_INPUT_TOKENS", + "LLM_USAGE_CACHE_READ_INPUT_TOKENS", + "LLM_TOKEN_TYPE", + "LLM_REQUEST_TYPE", + "LLM_FREQUENCY_PENALTY", + "LLM_PRESENCE_PENALTY", + "LLM_CHAT_STOP_SEQUENCES", + "LLM_REQUEST_FUNCTIONS", + "LLM_TOP_K", + "LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT", + ], + ) + def test_old_name_absent(self, old_name): + assert not hasattr(SpanAttributes, old_name), ( + f"SpanAttributes.{old_name} should have been removed. " + "Consumers should import from opentelemetry.semconv._incubating.attributes.gen_ai_attributes directly." + ) + + +# --------------------------------------------------------------------------- +# SpanAttributes — cache attributes +# --------------------------------------------------------------------------- + + +class TestSpanAttributesCacheDotSeparator: + """Cache token attributes use dot-separated sub-namespaces (spec update).""" + + def test_gen_ai_usage_cache_read_input_tokens(self): + assert SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS == "gen_ai.usage.cache_read.input_tokens" + + def test_gen_ai_usage_cache_creation_input_tokens(self): + assert SpanAttributes.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS == "gen_ai.usage.cache_creation.input_tokens" + + +# --------------------------------------------------------------------------- +# SpanAttributes — project-policy attributes use gen_ai namespace +# --------------------------------------------------------------------------- + + +class TestSpanAttributesProjectPolicy: + """Project-policy attributes (not in upstream OTel spec) use gen_ai namespace.""" + + def test_is_streaming(self): + assert SpanAttributes.GEN_AI_IS_STREAMING == "gen_ai.is_streaming" + + def test_user(self): + assert SpanAttributes.GEN_AI_USER == "gen_ai.user" + + def test_headers(self): + assert SpanAttributes.GEN_AI_HEADERS == "gen_ai.headers" + + +class TestSpanAttributesOldValuesAbsent: + """Regression: old/incorrect string values must not appear anywhere in SpanAttributes.""" + + @pytest.mark.parametrize( + "old_value", + [ + "llm.usage.total_tokens", + "llm.frequency_penalty", + "llm.presence_penalty", + "llm.is_streaming", + "llm.user", + "llm.headers", + "llm.top_k", + "llm.chat.stop_sequences", + "llm.request.functions", + "llm.request.repetition_penalty", + "llm.request.type", + "llm.usage.token_type", + "llm.response.finish_reason", + "llm.response.stop_reason", + "llm.content.completion.chunk", + "llm.request.reasoning_effort", + "llm.usage.reasoning_tokens", + "llm.chat_completions.streaming_time_to_generate", + "gen_ai.usage.cache_read_input_tokens", # underscore variant (pre-migration) + "gen_ai.usage.cache_creation_input_tokens", # underscore variant (pre-migration) + ], + ) + def test_old_value_not_in_span_attributes(self, old_value): + all_values = { + name: value + for name, value in vars(SpanAttributes).items() + if not name.startswith("_") and isinstance(value, str) + } + assert old_value not in all_values.values(), ( + f"Old attribute value {old_value!r} is still present in SpanAttributes. " + f"It should have been renamed." + ) + + +class TestSpanAttributesUnchanged: + """Constants that should NOT have changed — sanity check.""" + + def test_traceloop_span_kind_unchanged(self): + assert SpanAttributes.TRACELOOP_SPAN_KIND == "traceloop.span.kind" + + +# --------------------------------------------------------------------------- +# SpanAttributes — Watsonx vendor-specific attributes (renamed to GEN_AI_WATSONX_*) +# --------------------------------------------------------------------------- + + +class TestSpanAttributesWatsonxKept: + """ + llm.watsonx.* span attributes are intentionally kept. These use llm.watsonx as a + vendor-qualified prefix (analogous to db.chroma.*), not a generic llm.* namespace. + The Python names have been renamed to GEN_AI_WATSONX_* prefix. + """ + + def test_watsonx_decoding_method_kept(self): + assert SpanAttributes.GEN_AI_WATSONX_DECODING_METHOD == "llm.watsonx.decoding_method" + + def test_watsonx_random_seed_kept(self): + assert SpanAttributes.GEN_AI_WATSONX_RANDOM_SEED == "llm.watsonx.random_seed" + + def test_watsonx_max_new_tokens_kept(self): + assert SpanAttributes.GEN_AI_WATSONX_MAX_NEW_TOKENS == "llm.watsonx.max_new_tokens" + + def test_watsonx_min_new_tokens_kept(self): + assert SpanAttributes.GEN_AI_WATSONX_MIN_NEW_TOKENS == "llm.watsonx.min_new_tokens" + + def test_watsonx_repetition_penalty_kept(self): + assert SpanAttributes.GEN_AI_WATSONX_REPETITION_PENALTY == "llm.watsonx.repetition_penalty" + + +# --------------------------------------------------------------------------- +# GenAISystem enum — values must match OTel GenAiSystemValues where possible +# --------------------------------------------------------------------------- + + +class TestGenAISystemOtelAligned: + """Enum members that have a counterpart in OTel GenAiSystemValues.""" + + def test_openai(self): + assert GenAISystem.OPENAI.value == "openai" + + def test_anthropic_lowercase(self): + # Was "Anthropic" — must now match OTel GenAiSystemValues.ANTHROPIC + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.ANTHROPIC.value == GenAiSystemValues.ANTHROPIC.value + assert GenAISystem.ANTHROPIC.value == "anthropic" + + def test_cohere_lowercase(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.COHERE.value == GenAiSystemValues.COHERE.value + assert GenAISystem.COHERE.value == "cohere" + + def test_mistralai_spec_format(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.MISTRALAI.value == GenAiSystemValues.MISTRAL_AI.value + assert GenAISystem.MISTRALAI.value == "mistral_ai" + + def test_groq_lowercase(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.GROQ.value == GenAiSystemValues.GROQ.value + assert GenAISystem.GROQ.value == "groq" + + def test_watsonx_spec_format(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.WATSONX.value == GenAiSystemValues.IBM_WATSONX_AI.value + assert GenAISystem.WATSONX.value == "ibm.watsonx.ai" + + def test_aws_spec_format(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.AWS.value == GenAiSystemValues.AWS_BEDROCK.value + assert GenAISystem.AWS.value == "aws.bedrock" + + def test_azure_spec_format(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.AZURE.value == GenAiSystemValues.AZ_AI_OPENAI.value + assert GenAISystem.AZURE.value == "az.ai.openai" + + def test_google_spec_format(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.GOOGLE.value == GenAiSystemValues.GCP_GEN_AI.value + assert GenAISystem.GOOGLE.value == "gcp.gen_ai" + + +class TestGenAISystemProjectValues: + """Enum members without an OTel counterpart — project-defined lowercase values.""" + + def test_ollama(self): + assert GenAISystem.OLLAMA.value == "ollama" + + def test_aleph_alpha(self): + assert GenAISystem.ALEPH_ALPHA.value == "aleph_alpha" + + def test_replicate(self): + assert GenAISystem.REPLICATE.value == "replicate" + + def test_together_ai(self): + assert GenAISystem.TOGETHER_AI.value == "together_ai" + + def test_huggingface(self): + assert GenAISystem.HUGGINGFACE.value == "hugging_face" + + def test_fireworks(self): + assert GenAISystem.FIREWORKS.value == "fireworks" + + def test_openrouter(self): + assert GenAISystem.OPENROUTER.value == "openrouter" + + def test_langchain(self): + assert GenAISystem.LANGCHAIN.value == "langchain" + + def test_crewai(self): + assert GenAISystem.CREWAI.value == "crewai" + + +class TestGenAISystemNoCaps: + """All GenAISystem values must be lowercase (no PascalCase or camelCase).""" + + def test_all_values_lowercase(self): + non_lowercase = [ + member.name + for member in GenAISystem + if member.value != member.value.lower() and "." not in member.value + ] + assert non_lowercase == [], ( + f"GenAISystem members have non-lowercase values: {non_lowercase}. " + "Values should use lowercase with dots or underscores." + ) + + +# --------------------------------------------------------------------------- +# Meters — metric names must use gen_ai.* namespace +# --------------------------------------------------------------------------- + + +class TestMetersGenAiNamespace: + """Generic metric names must use gen_ai.* namespace.""" + + def test_streaming_time_to_generate(self): + assert Meters.LLM_STREAMING_TIME_TO_GENERATE == "llm.chat_completions.streaming_time_to_generate" + + def test_core_metrics_unchanged(self): + """Core gen_ai.client.* metrics already had the correct namespace.""" + assert Meters.LLM_GENERATION_CHOICES == "gen_ai.client.generation.choices" + assert Meters.LLM_TOKEN_USAGE == "gen_ai.client.token.usage" + assert Meters.LLM_OPERATION_DURATION == "gen_ai.client.operation.duration" + + +class TestMetersVendorNamespacesKept: + """ + Vendor-qualified metric names (llm.openai.*, llm.anthropic.*, llm.watsonx.*) + are intentionally kept. The llm. prefix is a vendor identifier, not the + generic llm.* attribute namespace being migrated. These will be renamed in the + respective package PRs if/when those vendors adopt the gen_ai namespace. + """ + + def test_openai_completions_exceptions_kept(self): + assert Meters.LLM_COMPLETIONS_EXCEPTIONS == "llm.openai.chat_completions.exceptions" + + def test_openai_embeddings_exceptions_kept(self): + assert Meters.LLM_EMBEDDINGS_EXCEPTIONS == "llm.openai.embeddings.exceptions" + + def test_openai_embeddings_vector_size_kept(self): + assert Meters.LLM_EMBEDDINGS_VECTOR_SIZE == "llm.openai.embeddings.vector_size" + + def test_openai_image_generations_exceptions_kept(self): + assert Meters.LLM_IMAGE_GENERATIONS_EXCEPTIONS == "llm.openai.image_generations.exceptions" + + def test_anthropic_completion_exceptions_kept(self): + assert Meters.LLM_ANTHROPIC_COMPLETION_EXCEPTIONS == "llm.anthropic.completion.exceptions" + + def test_watsonx_metrics_kept(self): + assert Meters.LLM_WATSONX_COMPLETIONS_DURATION == "llm.watsonx.completions.duration" + assert Meters.LLM_WATSONX_COMPLETIONS_EXCEPTIONS == "llm.watsonx.completions.exceptions" + assert Meters.LLM_WATSONX_COMPLETIONS_RESPONSES == "llm.watsonx.completions.responses" + assert Meters.LLM_WATSONX_COMPLETIONS_TOKENS == "llm.watsonx.completions.tokens" diff --git a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py index 6ff6db180c..3d187266f1 100644 --- a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py +++ b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py @@ -1 +1 @@ -__version__ = "0.4.16" +__version__ = "0.5.0" diff --git a/packages/opentelemetry-semantic-conventions-ai/pyproject.toml b/packages/opentelemetry-semantic-conventions-ai/pyproject.toml index 414f0e296a..581b3b41d1 100644 --- a/packages/opentelemetry-semantic-conventions-ai/pyproject.toml +++ b/packages/opentelemetry-semantic-conventions-ai/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.16" +version = "0.5.0" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" authors = [ { name = "Gal Kleinman", email = "gal@traceloop.com" }, diff --git a/packages/opentelemetry-semantic-conventions-ai/tests/test_semconv_compliance.py b/packages/opentelemetry-semantic-conventions-ai/tests/test_semconv_compliance.py new file mode 100644 index 0000000000..de81d3aa6b --- /dev/null +++ b/packages/opentelemetry-semantic-conventions-ai/tests/test_semconv_compliance.py @@ -0,0 +1,6 @@ +# ruff: noqa: F401, F403 +""" +Semconv compliance tests re-used from opentelemetry-semantic-conventions-ai. +To add more compliance checks, update _testing.py in that package — not here. +""" +from opentelemetry.semconv_ai._testing import * diff --git a/packages/opentelemetry-semantic-conventions-ai/tests/test_span_attributes.py b/packages/opentelemetry-semantic-conventions-ai/tests/test_span_attributes.py new file mode 100644 index 0000000000..eb6f56b70f --- /dev/null +++ b/packages/opentelemetry-semantic-conventions-ai/tests/test_span_attributes.py @@ -0,0 +1,7 @@ +# ruff: noqa: F401, F403 +""" +Semconv compliance tests — authoritative source lives in +opentelemetry/semconv_ai/_testing.py so any instrumentation package +can reuse them with a single import. +""" +from opentelemetry.semconv_ai._testing import * diff --git a/packages/opentelemetry-semantic-conventions-ai/uv.lock b/packages/opentelemetry-semantic-conventions-ai/uv.lock index 4283fafde2..c755090c04 100644 --- a/packages/opentelemetry-semantic-conventions-ai/uv.lock +++ b/packages/opentelemetry-semantic-conventions-ai/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 1 +revision = 3 requires-python = ">=3.9, <4" resolution-markers = [ "python_full_version >= '3.10'", @@ -14,18 +14,18 @@ dependencies = [ { name = "pycodestyle" }, { name = "tomli", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/50/d8/30873d2b7b57dee9263e53d142da044c4600a46f2d28374b3e38b023df16/autopep8-2.3.2.tar.gz", hash = "sha256:89440a4f969197b69a995e4ce0661b031f455a9f776d2c5ba3dbd83466931758", size = 92210 } +sdist = { url = "https://files.pythonhosted.org/packages/50/d8/30873d2b7b57dee9263e53d142da044c4600a46f2d28374b3e38b023df16/autopep8-2.3.2.tar.gz", hash = "sha256:89440a4f969197b69a995e4ce0661b031f455a9f776d2c5ba3dbd83466931758", size = 92210, upload-time = "2025-01-14T14:46:18.454Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9e/43/53afb8ba17218f19b77c7834128566c5bbb100a0ad9ba2e8e89d089d7079/autopep8-2.3.2-py2.py3-none-any.whl", hash = "sha256:ce8ad498672c845a0c3de2629c15b635ec2b05ef8177a6e7c91c74f3e9b51128", size = 45807 }, + { url = "https://files.pythonhosted.org/packages/9e/43/53afb8ba17218f19b77c7834128566c5bbb100a0ad9ba2e8e89d089d7079/autopep8-2.3.2-py2.py3-none-any.whl", hash = "sha256:ce8ad498672c845a0c3de2629c15b635ec2b05ef8177a6e7c91c74f3e9b51128", size = 45807, upload-time = "2025-01-14T14:46:15.466Z" }, ] [[package]] name = "colorama" version = "0.4.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] [[package]] @@ -35,9 +35,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371 } +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740 }, + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, ] [[package]] @@ -47,9 +47,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107 } +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865 }, + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, ] [[package]] @@ -59,9 +59,9 @@ source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version < '3.10'", ] -sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793 } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050 }, + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, ] [[package]] @@ -71,9 +71,9 @@ source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.10'", ] -sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503 } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484 }, + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] [[package]] @@ -84,9 +84,9 @@ dependencies = [ { name = "importlib-metadata" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767 } +sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767, upload-time = "2025-12-11T13:32:39.182Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356 }, + { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356, upload-time = "2025-12-11T13:32:17.304Z" }, ] [[package]] @@ -98,9 +98,9 @@ dependencies = [ { name = "opentelemetry-semantic-conventions" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460 } +sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460, upload-time = "2025-12-11T13:32:49.369Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565 }, + { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565, upload-time = "2025-12-11T13:32:35.069Z" }, ] [[package]] @@ -111,14 +111,14 @@ dependencies = [ { name = "opentelemetry-api" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935 } +sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935, upload-time = "2025-12-11T13:32:50.487Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982 }, + { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982, upload-time = "2025-12-11T13:32:36.955Z" }, ] [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.13" +version = "0.5.0" source = { editable = "." } dependencies = [ { name = "opentelemetry-sdk" }, @@ -151,36 +151,36 @@ dev = [ name = "packaging" version = "25.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727 } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469 }, + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] [[package]] name = "pluggy" version = "1.6.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412 } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538 }, + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, ] [[package]] name = "pycodestyle" version = "2.14.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/11/e0/abfd2a0d2efe47670df87f3e3a0e2edda42f055053c85361f19c0e2c1ca8/pycodestyle-2.14.0.tar.gz", hash = "sha256:c4b5b517d278089ff9d0abdec919cd97262a3367449ea1c8b49b91529167b783", size = 39472 } +sdist = { url = "https://files.pythonhosted.org/packages/11/e0/abfd2a0d2efe47670df87f3e3a0e2edda42f055053c85361f19c0e2c1ca8/pycodestyle-2.14.0.tar.gz", hash = "sha256:c4b5b517d278089ff9d0abdec919cd97262a3367449ea1c8b49b91529167b783", size = 39472, upload-time = "2025-06-20T18:49:48.75Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/27/a58ddaf8c588a3ef080db9d0b7e0b97215cee3a45df74f3a94dbbf5c893a/pycodestyle-2.14.0-py2.py3-none-any.whl", hash = "sha256:dd6bf7cb4ee77f8e016f9c8e74a35ddd9f67e1d5fd4184d86c3b98e07099f42d", size = 31594 }, + { url = "https://files.pythonhosted.org/packages/d7/27/a58ddaf8c588a3ef080db9d0b7e0b97215cee3a45df74f3a94dbbf5c893a/pycodestyle-2.14.0-py2.py3-none-any.whl", hash = "sha256:dd6bf7cb4ee77f8e016f9c8e74a35ddd9f67e1d5fd4184d86c3b98e07099f42d", size = 31594, upload-time = "2025-06-20T18:49:47.491Z" }, ] [[package]] name = "pygments" version = "2.19.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631 } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217 }, + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, ] [[package]] @@ -197,9 +197,9 @@ dependencies = [ { name = "pygments" }, { name = "tomli", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618 } +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750 }, + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, ] [[package]] @@ -212,35 +212,35 @@ dependencies = [ { name = "termcolor", version = "3.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "termcolor", version = "3.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f5/ac/5754f5edd6d508bc6493bc37d74b928f102a5fff82d9a80347e180998f08/pytest-sugar-1.0.0.tar.gz", hash = "sha256:6422e83258f5b0c04ce7c632176c7732cab5fdb909cb39cca5c9139f81276c0a", size = 14992 } +sdist = { url = "https://files.pythonhosted.org/packages/f5/ac/5754f5edd6d508bc6493bc37d74b928f102a5fff82d9a80347e180998f08/pytest-sugar-1.0.0.tar.gz", hash = "sha256:6422e83258f5b0c04ce7c632176c7732cab5fdb909cb39cca5c9139f81276c0a", size = 14992, upload-time = "2024-02-01T18:30:36.735Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/92/fb/889f1b69da2f13691de09a111c16c4766a433382d44aa0ecf221deded44a/pytest_sugar-1.0.0-py3-none-any.whl", hash = "sha256:70ebcd8fc5795dc457ff8b69d266a4e2e8a74ae0c3edc749381c64b5246c8dfd", size = 10171 }, + { url = "https://files.pythonhosted.org/packages/92/fb/889f1b69da2f13691de09a111c16c4766a433382d44aa0ecf221deded44a/pytest_sugar-1.0.0-py3-none-any.whl", hash = "sha256:70ebcd8fc5795dc457ff8b69d266a4e2e8a74ae0c3edc749381c64b5246c8dfd", size = 10171, upload-time = "2024-02-01T18:30:29.395Z" }, ] [[package]] name = "ruff" version = "0.14.11" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d4/77/9a7fe084d268f8855d493e5031ea03fa0af8cc05887f638bf1c4e3363eb8/ruff-0.14.11.tar.gz", hash = "sha256:f6dc463bfa5c07a59b1ff2c3b9767373e541346ea105503b4c0369c520a66958", size = 5993417 } +sdist = { url = "https://files.pythonhosted.org/packages/d4/77/9a7fe084d268f8855d493e5031ea03fa0af8cc05887f638bf1c4e3363eb8/ruff-0.14.11.tar.gz", hash = "sha256:f6dc463bfa5c07a59b1ff2c3b9767373e541346ea105503b4c0369c520a66958", size = 5993417, upload-time = "2026-01-08T19:11:58.322Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f0/a6/a4c40a5aaa7e331f245d2dc1ac8ece306681f52b636b40ef87c88b9f7afd/ruff-0.14.11-py3-none-linux_armv6l.whl", hash = "sha256:f6ff2d95cbd335841a7217bdfd9c1d2e44eac2c584197ab1385579d55ff8830e", size = 12951208 }, - { url = "https://files.pythonhosted.org/packages/5c/5c/360a35cb7204b328b685d3129c08aca24765ff92b5a7efedbdd6c150d555/ruff-0.14.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f6eb5c1c8033680f4172ea9c8d3706c156223010b8b97b05e82c59bdc774ee6", size = 13330075 }, - { url = "https://files.pythonhosted.org/packages/1b/9e/0cc2f1be7a7d33cae541824cf3f95b4ff40d03557b575912b5b70273c9ec/ruff-0.14.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f2fc34cc896f90080fca01259f96c566f74069a04b25b6205d55379d12a6855e", size = 12257809 }, - { url = "https://files.pythonhosted.org/packages/a7/e5/5faab97c15bb75228d9f74637e775d26ac703cc2b4898564c01ab3637c02/ruff-0.14.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53386375001773ae812b43205d6064dae49ff0968774e6befe16a994fc233caa", size = 12678447 }, - { url = "https://files.pythonhosted.org/packages/1b/33/e9767f60a2bef779fb5855cab0af76c488e0ce90f7bb7b8a45c8a2ba4178/ruff-0.14.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a697737dce1ca97a0a55b5ff0434ee7205943d4874d638fe3ae66166ff46edbe", size = 12758560 }, - { url = "https://files.pythonhosted.org/packages/eb/84/4c6cf627a21462bb5102f7be2a320b084228ff26e105510cd2255ea868e5/ruff-0.14.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6845ca1da8ab81ab1dce755a32ad13f1db72e7fba27c486d5d90d65e04d17b8f", size = 13599296 }, - { url = "https://files.pythonhosted.org/packages/88/e1/92b5ed7ea66d849f6157e695dc23d5d6d982bd6aa8d077895652c38a7cae/ruff-0.14.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e36ce2fd31b54065ec6f76cb08d60159e1b32bdf08507862e32f47e6dde8bcbf", size = 15048981 }, - { url = "https://files.pythonhosted.org/packages/61/df/c1bd30992615ac17c2fb64b8a7376ca22c04a70555b5d05b8f717163cf9f/ruff-0.14.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:590bcc0e2097ecf74e62a5c10a6b71f008ad82eb97b0a0079e85defe19fe74d9", size = 14633183 }, - { url = "https://files.pythonhosted.org/packages/04/e9/fe552902f25013dd28a5428a42347d9ad20c4b534834a325a28305747d64/ruff-0.14.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:53fe71125fc158210d57fe4da26e622c9c294022988d08d9347ec1cf782adafe", size = 14050453 }, - { url = "https://files.pythonhosted.org/packages/ae/93/f36d89fa021543187f98991609ce6e47e24f35f008dfe1af01379d248a41/ruff-0.14.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a35c9da08562f1598ded8470fcfef2afb5cf881996e6c0a502ceb61f4bc9c8a3", size = 13757889 }, - { url = "https://files.pythonhosted.org/packages/b7/9f/c7fb6ecf554f28709a6a1f2a7f74750d400979e8cd47ed29feeaa1bd4db8/ruff-0.14.11-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:0f3727189a52179393ecf92ec7057c2210203e6af2676f08d92140d3e1ee72c1", size = 13955832 }, - { url = "https://files.pythonhosted.org/packages/db/a0/153315310f250f76900a98278cf878c64dfb6d044e184491dd3289796734/ruff-0.14.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:eb09f849bd37147a789b85995ff734a6c4a095bed5fd1608c4f56afc3634cde2", size = 12586522 }, - { url = "https://files.pythonhosted.org/packages/2f/2b/a73a2b6e6d2df1d74bf2b78098be1572191e54bec0e59e29382d13c3adc5/ruff-0.14.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:c61782543c1231bf71041461c1f28c64b961d457d0f238ac388e2ab173d7ecb7", size = 12724637 }, - { url = "https://files.pythonhosted.org/packages/f0/41/09100590320394401cd3c48fc718a8ba71c7ddb1ffd07e0ad6576b3a3df2/ruff-0.14.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:82ff352ea68fb6766140381748e1f67f83c39860b6446966cff48a315c3e2491", size = 13145837 }, - { url = "https://files.pythonhosted.org/packages/3b/d8/e035db859d1d3edf909381eb8ff3e89a672d6572e9454093538fe6f164b0/ruff-0.14.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:728e56879df4ca5b62a9dde2dd0eb0edda2a55160c0ea28c4025f18c03f86984", size = 13850469 }, - { url = "https://files.pythonhosted.org/packages/4e/02/bb3ff8b6e6d02ce9e3740f4c17dfbbfb55f34c789c139e9cd91985f356c7/ruff-0.14.11-py3-none-win32.whl", hash = "sha256:337c5dd11f16ee52ae217757d9b82a26400be7efac883e9e852646f1557ed841", size = 12851094 }, - { url = "https://files.pythonhosted.org/packages/58/f1/90ddc533918d3a2ad628bc3044cdfc094949e6d4b929220c3f0eb8a1c998/ruff-0.14.11-py3-none-win_amd64.whl", hash = "sha256:f981cea63d08456b2c070e64b79cb62f951aa1305282974d4d5216e6e0178ae6", size = 14001379 }, - { url = "https://files.pythonhosted.org/packages/c4/1c/1dbe51782c0e1e9cfce1d1004752672d2d4629ea46945d19d731ad772b3b/ruff-0.14.11-py3-none-win_arm64.whl", hash = "sha256:649fb6c9edd7f751db276ef42df1f3df41c38d67d199570ae2a7bd6cbc3590f0", size = 12938644 }, + { url = "https://files.pythonhosted.org/packages/f0/a6/a4c40a5aaa7e331f245d2dc1ac8ece306681f52b636b40ef87c88b9f7afd/ruff-0.14.11-py3-none-linux_armv6l.whl", hash = "sha256:f6ff2d95cbd335841a7217bdfd9c1d2e44eac2c584197ab1385579d55ff8830e", size = 12951208, upload-time = "2026-01-08T19:12:09.218Z" }, + { url = "https://files.pythonhosted.org/packages/5c/5c/360a35cb7204b328b685d3129c08aca24765ff92b5a7efedbdd6c150d555/ruff-0.14.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f6eb5c1c8033680f4172ea9c8d3706c156223010b8b97b05e82c59bdc774ee6", size = 13330075, upload-time = "2026-01-08T19:12:02.549Z" }, + { url = "https://files.pythonhosted.org/packages/1b/9e/0cc2f1be7a7d33cae541824cf3f95b4ff40d03557b575912b5b70273c9ec/ruff-0.14.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f2fc34cc896f90080fca01259f96c566f74069a04b25b6205d55379d12a6855e", size = 12257809, upload-time = "2026-01-08T19:12:00.366Z" }, + { url = "https://files.pythonhosted.org/packages/a7/e5/5faab97c15bb75228d9f74637e775d26ac703cc2b4898564c01ab3637c02/ruff-0.14.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53386375001773ae812b43205d6064dae49ff0968774e6befe16a994fc233caa", size = 12678447, upload-time = "2026-01-08T19:12:13.899Z" }, + { url = "https://files.pythonhosted.org/packages/1b/33/e9767f60a2bef779fb5855cab0af76c488e0ce90f7bb7b8a45c8a2ba4178/ruff-0.14.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a697737dce1ca97a0a55b5ff0434ee7205943d4874d638fe3ae66166ff46edbe", size = 12758560, upload-time = "2026-01-08T19:11:42.55Z" }, + { url = "https://files.pythonhosted.org/packages/eb/84/4c6cf627a21462bb5102f7be2a320b084228ff26e105510cd2255ea868e5/ruff-0.14.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6845ca1da8ab81ab1dce755a32ad13f1db72e7fba27c486d5d90d65e04d17b8f", size = 13599296, upload-time = "2026-01-08T19:11:30.371Z" }, + { url = "https://files.pythonhosted.org/packages/88/e1/92b5ed7ea66d849f6157e695dc23d5d6d982bd6aa8d077895652c38a7cae/ruff-0.14.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e36ce2fd31b54065ec6f76cb08d60159e1b32bdf08507862e32f47e6dde8bcbf", size = 15048981, upload-time = "2026-01-08T19:12:04.742Z" }, + { url = "https://files.pythonhosted.org/packages/61/df/c1bd30992615ac17c2fb64b8a7376ca22c04a70555b5d05b8f717163cf9f/ruff-0.14.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:590bcc0e2097ecf74e62a5c10a6b71f008ad82eb97b0a0079e85defe19fe74d9", size = 14633183, upload-time = "2026-01-08T19:11:40.069Z" }, + { url = "https://files.pythonhosted.org/packages/04/e9/fe552902f25013dd28a5428a42347d9ad20c4b534834a325a28305747d64/ruff-0.14.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:53fe71125fc158210d57fe4da26e622c9c294022988d08d9347ec1cf782adafe", size = 14050453, upload-time = "2026-01-08T19:11:37.555Z" }, + { url = "https://files.pythonhosted.org/packages/ae/93/f36d89fa021543187f98991609ce6e47e24f35f008dfe1af01379d248a41/ruff-0.14.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a35c9da08562f1598ded8470fcfef2afb5cf881996e6c0a502ceb61f4bc9c8a3", size = 13757889, upload-time = "2026-01-08T19:12:07.094Z" }, + { url = "https://files.pythonhosted.org/packages/b7/9f/c7fb6ecf554f28709a6a1f2a7f74750d400979e8cd47ed29feeaa1bd4db8/ruff-0.14.11-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:0f3727189a52179393ecf92ec7057c2210203e6af2676f08d92140d3e1ee72c1", size = 13955832, upload-time = "2026-01-08T19:11:55.064Z" }, + { url = "https://files.pythonhosted.org/packages/db/a0/153315310f250f76900a98278cf878c64dfb6d044e184491dd3289796734/ruff-0.14.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:eb09f849bd37147a789b85995ff734a6c4a095bed5fd1608c4f56afc3634cde2", size = 12586522, upload-time = "2026-01-08T19:11:35.356Z" }, + { url = "https://files.pythonhosted.org/packages/2f/2b/a73a2b6e6d2df1d74bf2b78098be1572191e54bec0e59e29382d13c3adc5/ruff-0.14.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:c61782543c1231bf71041461c1f28c64b961d457d0f238ac388e2ab173d7ecb7", size = 12724637, upload-time = "2026-01-08T19:11:47.796Z" }, + { url = "https://files.pythonhosted.org/packages/f0/41/09100590320394401cd3c48fc718a8ba71c7ddb1ffd07e0ad6576b3a3df2/ruff-0.14.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:82ff352ea68fb6766140381748e1f67f83c39860b6446966cff48a315c3e2491", size = 13145837, upload-time = "2026-01-08T19:11:32.87Z" }, + { url = "https://files.pythonhosted.org/packages/3b/d8/e035db859d1d3edf909381eb8ff3e89a672d6572e9454093538fe6f164b0/ruff-0.14.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:728e56879df4ca5b62a9dde2dd0eb0edda2a55160c0ea28c4025f18c03f86984", size = 13850469, upload-time = "2026-01-08T19:12:11.694Z" }, + { url = "https://files.pythonhosted.org/packages/4e/02/bb3ff8b6e6d02ce9e3740f4c17dfbbfb55f34c789c139e9cd91985f356c7/ruff-0.14.11-py3-none-win32.whl", hash = "sha256:337c5dd11f16ee52ae217757d9b82a26400be7efac883e9e852646f1557ed841", size = 12851094, upload-time = "2026-01-08T19:11:45.163Z" }, + { url = "https://files.pythonhosted.org/packages/58/f1/90ddc533918d3a2ad628bc3044cdfc094949e6d4b929220c3f0eb8a1c998/ruff-0.14.11-py3-none-win_amd64.whl", hash = "sha256:f981cea63d08456b2c070e64b79cb62f951aa1305282974d4d5216e6e0178ae6", size = 14001379, upload-time = "2026-01-08T19:11:52.591Z" }, + { url = "https://files.pythonhosted.org/packages/c4/1c/1dbe51782c0e1e9cfce1d1004752672d2d4629ea46945d19d731ad772b3b/ruff-0.14.11-py3-none-win_arm64.whl", hash = "sha256:649fb6c9edd7f751db276ef42df1f3df41c38d67d199570ae2a7bd6cbc3590f0", size = 12938644, upload-time = "2026-01-08T19:11:50.027Z" }, ] [[package]] @@ -250,9 +250,9 @@ source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version < '3.10'", ] -sdist = { url = "https://files.pythonhosted.org/packages/ca/6c/3d75c196ac07ac8749600b60b03f4f6094d54e132c4d94ebac6ee0e0add0/termcolor-3.1.0.tar.gz", hash = "sha256:6a6dd7fbee581909eeec6a756cff1d7f7c376063b14e4a298dc4980309e55970", size = 14324 } +sdist = { url = "https://files.pythonhosted.org/packages/ca/6c/3d75c196ac07ac8749600b60b03f4f6094d54e132c4d94ebac6ee0e0add0/termcolor-3.1.0.tar.gz", hash = "sha256:6a6dd7fbee581909eeec6a756cff1d7f7c376063b14e4a298dc4980309e55970", size = 14324, upload-time = "2025-04-30T11:37:53.791Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4f/bd/de8d508070629b6d84a30d01d57e4a65c69aa7f5abe7560b8fad3b50ea59/termcolor-3.1.0-py3-none-any.whl", hash = "sha256:591dd26b5c2ce03b9e43f391264626557873ce1d379019786f99b0c2bee140aa", size = 7684 }, + { url = "https://files.pythonhosted.org/packages/4f/bd/de8d508070629b6d84a30d01d57e4a65c69aa7f5abe7560b8fad3b50ea59/termcolor-3.1.0-py3-none-any.whl", hash = "sha256:591dd26b5c2ce03b9e43f391264626557873ce1d379019786f99b0c2bee140aa", size = 7684, upload-time = "2025-04-30T11:37:52.382Z" }, ] [[package]] @@ -262,79 +262,79 @@ source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.10'", ] -sdist = { url = "https://files.pythonhosted.org/packages/46/79/cf31d7a93a8fdc6aa0fbb665be84426a8c5a557d9240b6239e9e11e35fc5/termcolor-3.3.0.tar.gz", hash = "sha256:348871ca648ec6a9a983a13ab626c0acce02f515b9e1983332b17af7979521c5", size = 14434 } +sdist = { url = "https://files.pythonhosted.org/packages/46/79/cf31d7a93a8fdc6aa0fbb665be84426a8c5a557d9240b6239e9e11e35fc5/termcolor-3.3.0.tar.gz", hash = "sha256:348871ca648ec6a9a983a13ab626c0acce02f515b9e1983332b17af7979521c5", size = 14434, upload-time = "2025-12-29T12:55:21.882Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/33/d1/8bb87d21e9aeb323cc03034f5eaf2c8f69841e40e4853c2627edf8111ed3/termcolor-3.3.0-py3-none-any.whl", hash = "sha256:cf642efadaf0a8ebbbf4bc7a31cec2f9b5f21a9f726f4ccbb08192c9c26f43a5", size = 7734 }, + { url = "https://files.pythonhosted.org/packages/33/d1/8bb87d21e9aeb323cc03034f5eaf2c8f69841e40e4853c2627edf8111ed3/termcolor-3.3.0-py3-none-any.whl", hash = "sha256:cf642efadaf0a8ebbbf4bc7a31cec2f9b5f21a9f726f4ccbb08192c9c26f43a5", size = 7734, upload-time = "2025-12-29T12:55:20.718Z" }, ] [[package]] name = "tomli" version = "2.4.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477 } +sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477, upload-time = "2026-01-11T11:22:38.165Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663 }, - { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469 }, - { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039 }, - { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007 }, - { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875 }, - { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271 }, - { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770 }, - { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626 }, - { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842 }, - { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894 }, - { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053 }, - { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481 }, - { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720 }, - { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014 }, - { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820 }, - { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712 }, - { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296 }, - { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553 }, - { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915 }, - { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038 }, - { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245 }, - { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335 }, - { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962 }, - { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396 }, - { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530 }, - { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227 }, - { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748 }, - { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725 }, - { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901 }, - { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375 }, - { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639 }, - { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897 }, - { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697 }, - { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567 }, - { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556 }, - { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014 }, - { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339 }, - { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490 }, - { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398 }, - { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515 }, - { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806 }, - { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340 }, - { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106 }, - { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504 }, - { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561 }, - { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477 }, + { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663, upload-time = "2026-01-11T11:21:45.27Z" }, + { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469, upload-time = "2026-01-11T11:21:46.873Z" }, + { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039, upload-time = "2026-01-11T11:21:48.503Z" }, + { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007, upload-time = "2026-01-11T11:21:49.456Z" }, + { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875, upload-time = "2026-01-11T11:21:50.755Z" }, + { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271, upload-time = "2026-01-11T11:21:51.81Z" }, + { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770, upload-time = "2026-01-11T11:21:52.647Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626, upload-time = "2026-01-11T11:21:53.459Z" }, + { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842, upload-time = "2026-01-11T11:21:54.831Z" }, + { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894, upload-time = "2026-01-11T11:21:56.07Z" }, + { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053, upload-time = "2026-01-11T11:21:57.467Z" }, + { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481, upload-time = "2026-01-11T11:21:58.661Z" }, + { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720, upload-time = "2026-01-11T11:22:00.178Z" }, + { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014, upload-time = "2026-01-11T11:22:01.238Z" }, + { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820, upload-time = "2026-01-11T11:22:02.727Z" }, + { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712, upload-time = "2026-01-11T11:22:03.777Z" }, + { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296, upload-time = "2026-01-11T11:22:04.86Z" }, + { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553, upload-time = "2026-01-11T11:22:05.854Z" }, + { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915, upload-time = "2026-01-11T11:22:06.703Z" }, + { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038, upload-time = "2026-01-11T11:22:07.56Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245, upload-time = "2026-01-11T11:22:08.344Z" }, + { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335, upload-time = "2026-01-11T11:22:09.951Z" }, + { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962, upload-time = "2026-01-11T11:22:11.27Z" }, + { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396, upload-time = "2026-01-11T11:22:12.325Z" }, + { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530, upload-time = "2026-01-11T11:22:13.865Z" }, + { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227, upload-time = "2026-01-11T11:22:15.224Z" }, + { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748, upload-time = "2026-01-11T11:22:16.009Z" }, + { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725, upload-time = "2026-01-11T11:22:17.269Z" }, + { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901, upload-time = "2026-01-11T11:22:18.287Z" }, + { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375, upload-time = "2026-01-11T11:22:19.154Z" }, + { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639, upload-time = "2026-01-11T11:22:20.168Z" }, + { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897, upload-time = "2026-01-11T11:22:21.544Z" }, + { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697, upload-time = "2026-01-11T11:22:23.058Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567, upload-time = "2026-01-11T11:22:24.033Z" }, + { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556, upload-time = "2026-01-11T11:22:25.378Z" }, + { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014, upload-time = "2026-01-11T11:22:26.138Z" }, + { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339, upload-time = "2026-01-11T11:22:27.143Z" }, + { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490, upload-time = "2026-01-11T11:22:28.399Z" }, + { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398, upload-time = "2026-01-11T11:22:29.345Z" }, + { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515, upload-time = "2026-01-11T11:22:30.327Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806, upload-time = "2026-01-11T11:22:32.56Z" }, + { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340, upload-time = "2026-01-11T11:22:33.505Z" }, + { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106, upload-time = "2026-01-11T11:22:34.451Z" }, + { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504, upload-time = "2026-01-11T11:22:35.764Z" }, + { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561, upload-time = "2026-01-11T11:22:36.624Z" }, + { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477, upload-time = "2026-01-11T11:22:37.446Z" }, ] [[package]] name = "typing-extensions" version = "4.15.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391 } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614 }, + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, ] [[package]] name = "zipp" version = "3.23.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547 } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276 }, + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, ]