diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py index 15df002e29..fa36d55587 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py @@ -39,9 +39,12 @@ from opentelemetry.semconv._incubating.attributes import ( gen_ai_attributes as GenAIAttributes, ) +from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import ( + GenAiOperationNameValues, + GenAiSystemValues, +) from opentelemetry.semconv_ai import ( SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, - LLMRequestTypeValues, Meters, SpanAttributes, ) @@ -278,7 +281,7 @@ async def _aset_token_usage( choices, attributes={ **metric_attributes, - SpanAttributes.LLM_RESPONSE_STOP_REASON: getattr(response, "stop_reason", None), + SpanAttributes.GEN_AI_RESPONSE_STOP_REASON: getattr(response, "stop_reason", None), }, ) @@ -286,7 +289,7 @@ async def _aset_token_usage( set_span_attribute( span, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens ) - set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens) + set_span_attribute(span, SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens) set_span_attribute( span, SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens @@ -392,7 +395,7 @@ def _set_token_usage( choices, attributes={ **metric_attributes, - SpanAttributes.LLM_RESPONSE_STOP_REASON: getattr(response, "stop_reason", None), + SpanAttributes.GEN_AI_RESPONSE_STOP_REASON: getattr(response, "stop_reason", None), }, ) @@ -400,7 +403,7 @@ def _set_token_usage( set_span_attribute( span, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens ) - set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens) + set_span_attribute(span, SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens) set_span_attribute( span, SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens @@ -537,12 +540,17 @@ def _wrap( return wrapped(*args, **kwargs) name = to_wrap.get("span_name") + operation_name = ( + GenAiOperationNameValues.TEXT_COMPLETION.value + if name == "anthropic.completion" + else GenAiOperationNameValues.CHAT.value + ) span = tracer.start_span( name, kind=SpanKind.CLIENT, attributes={ - GenAIAttributes.GEN_AI_SYSTEM: "Anthropic", - SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value, + GenAIAttributes.GEN_AI_PROVIDER_NAME: GenAiSystemValues.ANTHROPIC.value, + GenAIAttributes.GEN_AI_OPERATION_NAME: operation_name, }, ) @@ -661,12 +669,17 @@ async def _awrap( return await wrapped(*args, **kwargs) name = to_wrap.get("span_name") + operation_name = ( + GenAiOperationNameValues.TEXT_COMPLETION.value + if name == "anthropic.completion" + else GenAiOperationNameValues.CHAT.value + ) span = tracer.start_span( name, kind=SpanKind.CLIENT, attributes={ - GenAIAttributes.GEN_AI_SYSTEM: "Anthropic", - SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value, + GenAIAttributes.GEN_AI_PROVIDER_NAME: GenAiSystemValues.ANTHROPIC.value, + GenAIAttributes.GEN_AI_OPERATION_NAME: operation_name, }, ) await _ahandle_input(span, event_logger, kwargs) diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/span_utils.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/span_utils.py index 599da108d6..ee141a14fb 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/span_utils.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/span_utils.py @@ -76,96 +76,92 @@ async def aset_input_attributes(span, kwargs): set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_MODEL, kwargs.get("model")) set_span_attribute( - span, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, kwargs.get("max_tokens_to_sample") + span, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, kwargs.get("max_tokens_to_sample") or kwargs.get("max_tokens") ) set_span_attribute( span, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, kwargs.get("temperature") ) set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_TOP_P, kwargs.get("top_p")) set_span_attribute( - span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty") + span, GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY, kwargs.get("frequency_penalty") ) set_span_attribute( - span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty") + span, GenAIAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY, kwargs.get("presence_penalty") ) - set_span_attribute(span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream")) + set_span_attribute(span, SpanAttributes.GEN_AI_IS_STREAMING, kwargs.get("stream")) if should_send_prompts(): if kwargs.get("prompt") is not None: set_span_attribute( - span, f"{GenAIAttributes.GEN_AI_PROMPT}.0.user", kwargs.get("prompt") + span, + GenAIAttributes.GEN_AI_INPUT_MESSAGES, + json.dumps([{"role": "user", "content": kwargs.get("prompt")}]), ) elif kwargs.get("messages") is not None: - has_system_message = False if kwargs.get("system"): - has_system_message = True set_span_attribute( span, - f"{GenAIAttributes.GEN_AI_PROMPT}.0.content", + GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS, await _dump_content( message_index=0, span=span, content=kwargs.get("system") ), ) - set_span_attribute( - span, - f"{GenAIAttributes.GEN_AI_PROMPT}.0.role", - "system", - ) + + input_messages = [] for i, message in enumerate(kwargs.get("messages")): - prompt_index = i + (1 if has_system_message else 0) content = message.get("content") tool_use_blocks = [] - other_blocks = [] + non_tool_use_content = content if isinstance(content, list): - for block in content: - if dict(block).get("type") == "tool_use": - tool_use_blocks.append(dict(block)) - else: - other_blocks.append(block) - content = other_blocks - set_span_attribute( - span, - f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.content", - await _dump_content( - message_index=i, span=span, content=message.get("content") + tool_use_blocks = [ + dict(block) + for block in content + if dict(block).get("type") == "tool_use" + ] + non_tool_use_content = [ + block + for block in content + if dict(block).get("type") != "tool_use" + ] or None + + msg_obj = { + "role": message.get("role"), + "content": await _dump_content( + message_index=i, span=span, content=non_tool_use_content ), - ) - set_span_attribute( - span, - f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.role", - message.get("role"), - ) + } if tool_use_blocks: - for tool_num, tool_use_block in enumerate(tool_use_blocks): - set_span_attribute( - span, - f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.tool_calls.{tool_num}.id", - tool_use_block.get("id"), - ) - set_span_attribute( - span, - f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.tool_calls.{tool_num}.name", - tool_use_block.get("name"), - ) - set_span_attribute( - span, - f"{GenAIAttributes.GEN_AI_PROMPT}.{prompt_index}.tool_calls.{tool_num}.arguments", - json.dumps(tool_use_block.get("input")), - ) + msg_obj["tool_calls"] = [ + { + "id": block.get("id"), + "name": block.get("name"), + "arguments": json.dumps(block.get("input")), + } + for block in tool_use_blocks + ] + input_messages.append(msg_obj) + + set_span_attribute( + span, + GenAIAttributes.GEN_AI_INPUT_MESSAGES, + json.dumps(input_messages, cls=JSONEncoder), + ) if kwargs.get("tools") is not None: - for i, tool in enumerate(kwargs.get("tools")): - prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}" - set_span_attribute(span, f"{prefix}.name", tool.get("name")) - set_span_attribute( - span, f"{prefix}.description", tool.get("description") - ) - input_schema = tool.get("input_schema") - if input_schema is not None: - set_span_attribute( - span, f"{prefix}.input_schema", json.dumps(input_schema) - ) + tool_defs = [] + for tool in kwargs.get("tools"): + tool_def = {"name": tool.get("name")} + if tool.get("description"): + tool_def["description"] = tool.get("description") + if tool.get("input_schema") is not None: + tool_def["input_schema"] = tool.get("input_schema") + tool_defs.append(tool_def) + set_span_attribute( + span, + GenAIAttributes.GEN_AI_TOOL_DEFINITIONS, + json.dumps(tool_defs, cls=JSONEncoder), + ) output_format = kwargs.get("output_format") if output_format and isinstance(output_format, dict): @@ -174,146 +170,120 @@ async def aset_input_attributes(span, kwargs): if schema: set_span_attribute( span, - "gen_ai.request.structured_output_schema", + SpanAttributes.GEN_AI_REQUEST_STRUCTURED_OUTPUT_SCHEMA, json.dumps(schema), ) async def _aset_span_completions(span, response): - if not should_send_prompts(): - return from opentelemetry.instrumentation.anthropic import set_span_attribute from opentelemetry.instrumentation.anthropic.utils import _aextract_response_data response = await _aextract_response_data(response) - index = 0 - prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{index}" - set_span_attribute(span, f"{prefix}.finish_reason", response.get("stop_reason")) - if response.get("role"): - set_span_attribute(span, f"{prefix}.role", response.get("role")) + stop_reason = response.get("stop_reason") + + if stop_reason: + span.set_attribute(GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, [stop_reason]) + + if not should_send_prompts(): + return + + output_messages = [] if response.get("completion"): - set_span_attribute(span, f"{prefix}.content", response.get("completion")) + output_messages.append({ + "role": response.get("role", "assistant"), + "content": response.get("completion"), + }) elif response.get("content"): - tool_call_index = 0 + tool_calls = [] text = "" + thinking_messages = [] for content in response.get("content"): content_block_type = content.type - # usually, Antrhopic responds with just one text block, - # but the API allows for multiple text blocks, so concatenate them if content_block_type == "text" and hasattr(content, "text"): text += content.text elif content_block_type == "thinking": - content = dict(content) - # override the role to thinking - set_span_attribute( - span, - f"{prefix}.role", - "thinking", - ) - set_span_attribute( - span, - f"{prefix}.content", - content.get("thinking"), - ) - # increment the index for subsequent content blocks - index += 1 - prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{index}" - # set the role to the original role on the next completions - set_span_attribute( - span, - f"{prefix}.role", - response.get("role"), - ) + thinking_messages.append({ + "role": "thinking", + "content": getattr(content, "thinking", None), + }) elif content_block_type == "tool_use": - content = dict(content) - set_span_attribute( - span, - f"{prefix}.tool_calls.{tool_call_index}.id", - content.get("id"), - ) - set_span_attribute( - span, - f"{prefix}.tool_calls.{tool_call_index}.name", - content.get("name"), - ) - tool_arguments = content.get("input") - if tool_arguments is not None: - set_span_attribute( - span, - f"{prefix}.tool_calls.{tool_call_index}.arguments", - json.dumps(tool_arguments), - ) - tool_call_index += 1 - set_span_attribute(span, f"{prefix}.content", text) + tool_arguments = getattr(content, "input", None) + tool_calls.append({ + "id": getattr(content, "id", None), + "name": getattr(content, "name", None), + "arguments": json.dumps(tool_arguments) if tool_arguments is not None else None, + }) + + output_messages.extend(thinking_messages) + msg = {"role": response.get("role", "assistant"), "content": text} + if tool_calls: + msg["tool_calls"] = tool_calls + output_messages.append(msg) + + if output_messages: + set_span_attribute( + span, + GenAIAttributes.GEN_AI_OUTPUT_MESSAGES, + json.dumps(output_messages, cls=JSONEncoder), + ) def _set_span_completions(span, response): - if not should_send_prompts(): - return from opentelemetry.instrumentation.anthropic import set_span_attribute response = _extract_response_data(response) - index = 0 - prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{index}" - set_span_attribute(span, f"{prefix}.finish_reason", response.get("stop_reason")) - if response.get("role"): - set_span_attribute(span, f"{prefix}.role", response.get("role")) + stop_reason = response.get("stop_reason") + + if stop_reason: + span.set_attribute(GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, [stop_reason]) + + if not should_send_prompts(): + return + + output_messages = [] if response.get("completion"): - set_span_attribute(span, f"{prefix}.content", response.get("completion")) + output_messages.append({ + "role": response.get("role", "assistant"), + "content": response.get("completion"), + }) elif response.get("content"): - tool_call_index = 0 + tool_calls = [] text = "" + thinking_messages = [] for content in response.get("content"): content_block_type = content.type - # usually, Antrhopic responds with just one text block, + # usually, Anthropic responds with just one text block, # but the API allows for multiple text blocks, so concatenate them if content_block_type == "text" and hasattr(content, "text"): text += content.text elif content_block_type == "thinking": - content = dict(content) - # override the role to thinking - set_span_attribute( - span, - f"{prefix}.role", - "thinking", - ) - set_span_attribute( - span, - f"{prefix}.content", - content.get("thinking"), - ) - # increment the index for subsequent content blocks - index += 1 - prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{index}" - # set the role to the original role on the next completions - set_span_attribute( - span, - f"{prefix}.role", - response.get("role"), - ) + thinking_messages.append({ + "role": "thinking", + "content": getattr(content, "thinking", None), + }) elif content_block_type == "tool_use": - content = dict(content) - set_span_attribute( - span, - f"{prefix}.tool_calls.{tool_call_index}.id", - content.get("id"), - ) - set_span_attribute( - span, - f"{prefix}.tool_calls.{tool_call_index}.name", - content.get("name"), - ) - tool_arguments = content.get("input") - if tool_arguments is not None: - set_span_attribute( - span, - f"{prefix}.tool_calls.{tool_call_index}.arguments", - json.dumps(tool_arguments), - ) - tool_call_index += 1 - set_span_attribute(span, f"{prefix}.content", text) + tool_arguments = getattr(content, "input", None) + tool_calls.append({ + "id": getattr(content, "id", None), + "name": getattr(content, "name", None), + "arguments": json.dumps(tool_arguments) if tool_arguments is not None else None, + }) + + output_messages.extend(thinking_messages) + msg = {"role": response.get("role", "assistant"), "content": text} + if tool_calls: + msg["tool_calls"] = tool_calls + output_messages.append(msg) + + if output_messages: + set_span_attribute( + span, + GenAIAttributes.GEN_AI_OUTPUT_MESSAGES, + json.dumps(output_messages, cls=JSONEncoder), + ) @dont_throw @@ -334,7 +304,7 @@ async def aset_response_attributes(span, response): ) set_span_attribute( span, - SpanAttributes.LLM_USAGE_TOTAL_TOKENS, + SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS, prompt_tokens + completion_tokens, ) @@ -358,7 +328,7 @@ def set_response_attributes(span, response): ) set_span_attribute( span, - SpanAttributes.LLM_USAGE_TOTAL_TOKENS, + SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS, prompt_tokens + completion_tokens, ) @@ -367,40 +337,55 @@ def set_response_attributes(span, response): @dont_throw def set_streaming_response_attributes(span, complete_response_events): - if not should_send_prompts(): - return - from opentelemetry.instrumentation.anthropic import set_span_attribute if not span.is_recording() or not complete_response_events: return - index = 0 + output_messages = [] + finish_reasons = [] + for event in complete_response_events: - prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{index}" - set_span_attribute(span, f"{prefix}.finish_reason", event.get("finish_reason")) - role = "thinking" if event.get("type") == "thinking" else "assistant" - # Thinking is added as a separate completion, so we need to increment the index - if event.get("type") == "thinking": - index += 1 - set_span_attribute(span, f"{prefix}.role", role) - if event.get("type") == "tool_use": - set_span_attribute( - span, - f"{prefix}.tool_calls.0.id", - event.get("id"), - ) - set_span_attribute( - span, - f"{prefix}.tool_calls.0.name", - event.get("name"), - ) - tool_arguments = event.get("input") - if tool_arguments is not None: - set_span_attribute( - span, - f"{prefix}.tool_calls.0.arguments", - tool_arguments, - ) - else: - set_span_attribute(span, f"{prefix}.content", event.get("text")) + finish_reason = event.get("finish_reason") + if finish_reason and finish_reason not in finish_reasons: + finish_reasons.append(finish_reason) + + if should_send_prompts(): + if event.get("type") == "thinking": + output_messages.append({ + "role": "thinking", + "content": event.get("text"), + }) + elif event.get("type") == "tool_use": + tool_arguments = event.get("input") + # Streaming accumulates input as a JSON string via input_json_delta; + # non-streaming may pass a dict. Normalise to a JSON string either way. + if isinstance(tool_arguments, str): + arguments = tool_arguments or None + elif tool_arguments is not None: + arguments = json.dumps(tool_arguments) + else: + arguments = None + output_messages.append({ + "role": "assistant", + "tool_calls": [{ + "id": event.get("id"), + "name": event.get("name"), + "arguments": arguments, + }], + }) + else: + output_messages.append({ + "role": "assistant", + "content": event.get("text"), + }) + + if finish_reasons: + span.set_attribute(GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons) + + if output_messages: + set_span_attribute( + span, + GenAIAttributes.GEN_AI_OUTPUT_MESSAGES, + json.dumps(output_messages, cls=JSONEncoder), + ) diff --git a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py index 79cd28f043..361b71e1c8 100644 --- a/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py +++ b/packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/streaming.py @@ -92,7 +92,7 @@ def _set_token_usage( set_span_attribute( span, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens ) - set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens) + set_span_attribute(span, SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens) set_span_attribute( span, SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens @@ -129,7 +129,7 @@ def _set_token_usage( 1, attributes={ **metric_attributes, - SpanAttributes.LLM_RESPONSE_FINISH_REASON: event.get( + SpanAttributes.GEN_AI_RESPONSE_FINISH_REASON: event.get( "finish_reason" ), }, diff --git a/packages/opentelemetry-instrumentation-anthropic/pyproject.toml b/packages/opentelemetry-instrumentation-anthropic/pyproject.toml index 9cdae8833a..216e77699a 100644 --- a/packages/opentelemetry-instrumentation-anthropic/pyproject.toml +++ b/packages/opentelemetry-instrumentation-anthropic/pyproject.toml @@ -13,7 +13,7 @@ requires-python = ">=3.10,<4" dependencies = [ "opentelemetry-api>=1.38.0,<2", "opentelemetry-instrumentation>=0.59b0", - "opentelemetry-semantic-conventions-ai>=0.4.14,<0.5.0", + "opentelemetry-semantic-conventions-ai>=0.5.0,<0.6.0", "opentelemetry-semantic-conventions>=0.59b0", ] @@ -74,3 +74,6 @@ select = ["E", "F", "W"] [tool.uv] constraint-dependencies = ["urllib3>=2.6.3", "pip>=25.3"] + +[tool.uv.sources] +opentelemetry-semantic-conventions-ai = { path = "../opentelemetry-semantic-conventions-ai", editable = true } diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/test_bedrock_with_raw_response.py b/packages/opentelemetry-instrumentation-anthropic/tests/test_bedrock_with_raw_response.py index 6d87f51093..f219dba577 100644 --- a/packages/opentelemetry-instrumentation-anthropic/tests/test_bedrock_with_raw_response.py +++ b/packages/opentelemetry-instrumentation-anthropic/tests/test_bedrock_with_raw_response.py @@ -1,3 +1,4 @@ +import json import os import pytest from opentelemetry.semconv._incubating.attributes import ( @@ -55,31 +56,24 @@ async def test_async_anthropic_bedrock_with_raw_response( assert all(span.name == "anthropic.chat" for span in spans) anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Tell me a joke about OpenTelemetry" - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "user" + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "Tell me a joke about OpenTelemetry" + assert input_messages[0]["role"] == "user" # For raw response, content is accessed differently response_content = ( response.parse().content[0].text if hasattr(response, "parse") else response.content[0].text ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response_content - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response_content + assert output_messages[-1]["role"] == "assistant" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] > 0 assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] > 0 assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) @@ -110,25 +104,18 @@ async def test_async_anthropic_bedrock_regular_create( assert all(span.name == "anthropic.chat" for span in spans) anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Tell me a joke about OpenTelemetry" - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "user" - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response.content[0].text - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "Tell me a joke about OpenTelemetry" + assert input_messages[0]["role"] == "user" + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response.content[0].text + assert output_messages[-1]["role"] == "assistant" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] > 0 assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] > 0 assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) @@ -161,29 +148,22 @@ async def test_async_anthropic_bedrock_beta_with_raw_response( assert all(span.name == "anthropic.chat" for span in spans) anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Tell me a joke about OpenTelemetry" - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "user" + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "Tell me a joke about OpenTelemetry" + assert input_messages[0]["role"] == "user" # For raw response, content is accessed differently response_content = ( response.parse().content[0].text if hasattr(response, "parse") else response.content[0].text ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response_content - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response_content + assert output_messages[-1]["role"] == "assistant" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] > 0 assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] > 0 assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py b/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py index 4f61b82110..5525ab6e31 100644 --- a/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py +++ b/packages/opentelemetry-instrumentation-anthropic/tests/test_completion.py @@ -1,3 +1,4 @@ +import json import pytest from anthropic import AI_PROMPT, HUMAN_PROMPT from opentelemetry.sdk._logs import ReadableLogRecord @@ -29,11 +30,11 @@ def test_anthropic_completion_legacy( assert all(span.name == "anthropic.completion" for span in spans) anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.user"] - == f"{HUMAN_PROMPT}\nHello world\n{AI_PROMPT}" - ) - assert anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == f"{HUMAN_PROMPT}\nHello world\n{AI_PROMPT}" + assert input_messages[0]["role"] == "user" + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] assert ( anthropic_span.attributes.get("gen_ai.response.id") == "compl_01EjfrPvPEsRDRUKD6VoBxtK" diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/test_messages.py b/packages/opentelemetry-instrumentation-anthropic/tests/test_messages.py index 9abad4c93c..34bba1284f 100644 --- a/packages/opentelemetry-instrumentation-anthropic/tests/test_messages.py +++ b/packages/opentelemetry-instrumentation-anthropic/tests/test_messages.py @@ -89,24 +89,17 @@ def test_anthropic_message_create_legacy( assert all(span.name == "anthropic.chat" for span in spans) anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Tell me a joke about OpenTelemetry" - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "user" - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response.content[0].text - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "Tell me a joke about OpenTelemetry" + assert input_messages[0]["role"] == "user" + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response.content[0].text + assert output_messages[-1]["role"] == "assistant" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 17 assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) assert ( anthropic_span.attributes.get("gen_ai.response.id") @@ -153,7 +146,7 @@ def test_anthropic_message_create_with_events_with_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -218,7 +211,7 @@ def test_anthropic_message_create_with_events_with_no_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -268,28 +261,20 @@ def test_anthropic_multi_modal_legacy( "anthropic.chat", ] anthropic_span = spans[0] - assert anthropic_span.attributes[ - f"{GenAIAttributes.GEN_AI_PROMPT}.0.content" - ] == json.dumps( - [ - {"type": "text", "text": "What do you see?"}, - {"type": "image_url", "image_url": {"url": "/some/url"}}, - ] - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "user" - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response.content[0].text - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert json.loads(input_messages[0]["content"]) == [ + {"type": "text", "text": "What do you see?"}, + {"type": "image_url", "image_url": {"url": "/some/url"}}, + ] + assert input_messages[0]["role"] == "user" + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response.content[0].text + assert output_messages[-1]["role"] == "assistant" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 1381 assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) assert ( anthropic_span.attributes.get("gen_ai.response.id") @@ -343,7 +328,7 @@ def test_anthropic_multi_modal_with_events_with_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) logs = log_exporter.get_finished_logs() @@ -404,7 +389,7 @@ def test_anthropic_multi_modal_with_events_with_no_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) logs = log_exporter.get_finished_logs() @@ -463,55 +448,34 @@ def test_anthropic_image_with_history( spans = span_exporter.get_finished_spans() assert all(span.name == "anthropic.chat" for span in spans) - assert ( - spans[0].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] == system_message - ) - assert spans[0].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"] == "system" - assert ( - spans[0].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.content"] - == "Are you capable of describing an image?" - ) - assert spans[0].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.role"] == "user" - assert ( - spans[0].attributes[f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content"] - == response1.content[0].text - ) - assert ( - spans[0].attributes[f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role"] == "assistant" - ) + + # span 0: system + first user message + assert spans[0].attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] == system_message + span0_input = json.loads(spans[0].attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert span0_input[0]["content"] == "Are you capable of describing an image?" + assert span0_input[0]["role"] == "user" + span0_output = json.loads(spans[0].attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert span0_output[-1]["content"] == response1.content[0].text + assert span0_output[-1]["role"] == "assistant" assert ( spans[0].attributes.get("gen_ai.response.id") == "msg_01Ctc62hUPvikvYASXZqTo9q" ) - assert ( - spans[1].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] == system_message - ) - assert spans[1].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"] == "system" - assert ( - spans[1].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.content"] - == "Are you capable of describing an image?" - ) - assert spans[1].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.1.role"] == "user" - assert ( - spans[1].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.2.content"] - == response1.content[0].text - ) - assert spans[1].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.2.role"] == "assistant" - assert json.loads( - spans[1].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.3.content"] - ) == [ + # span 1: system + multi-turn + assert spans[1].attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] == system_message + span1_input = json.loads(spans[1].attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert span1_input[0]["content"] == "Are you capable of describing an image?" + assert span1_input[0]["role"] == "user" + assert span1_input[1]["content"] == response1.content[0].text + assert span1_input[1]["role"] == "assistant" + assert json.loads(span1_input[2]["content"]) == [ {"type": "text", "text": "What do you see?"}, {"type": "image_url", "image_url": {"url": "/some/url"}}, ] - assert spans[1].attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.3.role"] == "user" - - assert ( - spans[1].attributes[f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content"] - == response2.content[0].text - ) - assert ( - spans[1].attributes[f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role"] == "assistant" - ) + assert span1_input[2]["role"] == "user" + span1_output = json.loads(spans[1].attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert span1_output[-1]["content"] == response2.content[0].text + assert span1_output[-1]["role"] == "assistant" assert ( spans[1].attributes.get("gen_ai.response.id") == "msg_01EtAvxHCWn5jjdUCnG4wEAd" ) @@ -549,28 +513,20 @@ async def test_anthropic_async_multi_modal_legacy( "anthropic.chat", ] anthropic_span = spans[0] - assert anthropic_span.attributes[ - f"{GenAIAttributes.GEN_AI_PROMPT}.0.content" - ] == json.dumps( - [ - {"type": "text", "text": "What do you see?"}, - {"type": "image_url", "image_url": {"url": "/some/url"}}, - ] - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "user" - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response.content[0].text - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert json.loads(input_messages[0]["content"]) == [ + {"type": "text", "text": "What do you see?"}, + {"type": "image_url", "image_url": {"url": "/some/url"}}, + ] + assert input_messages[0]["role"] == "user" + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response.content[0].text + assert output_messages[-1]["role"] == "assistant" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 1311 assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) assert ( anthropic_span.attributes.get("gen_ai.response.id") @@ -625,7 +581,7 @@ async def test_anthropic_async_multi_modal_with_events_with_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) logs = log_exporter.get_finished_logs() @@ -687,7 +643,7 @@ async def test_anthropic_async_multi_modal_with_events_with_no_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) logs = log_exporter.get_finished_logs() @@ -738,24 +694,17 @@ def test_anthropic_message_streaming_legacy( "anthropic.chat", ] anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Tell me a joke about OpenTelemetry" - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "user" - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response_content - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "Tell me a joke about OpenTelemetry" + assert input_messages[0]["role"] == "user" + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response_content + assert output_messages[-1]["role"] == "assistant" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 17 assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) assert ( anthropic_span.attributes.get("gen_ai.response.id") @@ -809,7 +758,7 @@ def test_anthropic_message_streaming_with_events_with_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -874,7 +823,7 @@ def test_anthropic_message_streaming_with_events_with_no_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -925,24 +874,17 @@ async def test_async_anthropic_message_create_legacy( "anthropic.chat", ] anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Tell me a joke about OpenTelemetry" - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "user" - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response.content[0].text - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "Tell me a joke about OpenTelemetry" + assert input_messages[0]["role"] == "user" + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response.content[0].text + assert output_messages[-1]["role"] == "assistant" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 17 assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) assert ( anthropic_span.attributes.get("gen_ai.response.id") @@ -989,7 +931,7 @@ async def test_async_anthropic_message_create_with_events_with_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -1047,7 +989,7 @@ async def test_async_anthropic_message_create_with_events_with_no_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -1103,24 +1045,17 @@ async def test_async_anthropic_message_streaming_legacy( "anthropic.chat", ] anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Tell me a joke about OpenTelemetry" - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "user" - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response_content - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "Tell me a joke about OpenTelemetry" + assert input_messages[0]["role"] == "user" + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response_content + assert output_messages[-1]["role"] == "assistant" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 17 assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) assert ( anthropic_span.attributes.get("gen_ai.response.id") @@ -1174,7 +1109,7 @@ async def test_async_anthropic_message_streaming_with_events_with_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -1238,7 +1173,7 @@ async def test_async_anthropic_message_streaming_with_events_with_no_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -1295,97 +1230,59 @@ def test_anthropic_tools_legacy( assert ( anthropic_span.attributes["gen_ai.usage.output_tokens"] + anthropic_span.attributes["gen_ai.usage.input_tokens"] - == anthropic_span.attributes["llm.usage.total_tokens"] + == anthropic_span.attributes["gen_ai.usage.total_tokens"] ) # verify request and inputs - assert ( - anthropic_span.attributes["gen_ai.prompt.0.content"] - == "What is the weather like right now in New York? Also what time is it there now?" - ) - assert anthropic_span.attributes["gen_ai.prompt.0.role"] == "user" - assert anthropic_span.attributes["llm.request.functions.0.name"] == "get_weather" - assert ( - anthropic_span.attributes["llm.request.functions.0.description"] - == "Get the current weather in a given location" - ) - assert anthropic_span.attributes[ - "llm.request.functions.0.input_schema" - ] == json.dumps( - { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"], - "description": "The unit of temperature, either 'celsius' or 'fahrenheit'", - }, + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == ( + "What is the weather like right now in New York? Also what time is it there now?" + ) + assert input_messages[0]["role"] == "user" + tool_defs = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_TOOL_DEFINITIONS]) + assert tool_defs[0]["name"] == "get_weather" + assert tool_defs[0]["description"] == "Get the current weather in a given location" + assert tool_defs[0]["input_schema"] == { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", }, - "required": ["location"], - } - ) - assert anthropic_span.attributes["llm.request.functions.1.name"] == "get_time" - assert ( - anthropic_span.attributes["llm.request.functions.1.description"] - == "Get the current time in a given time zone" - ) - assert anthropic_span.attributes[ - "llm.request.functions.1.input_schema" - ] == json.dumps( - { - "type": "object", - "properties": { - "timezone": { - "type": "string", - "description": "The IANA time zone name, e.g. America/Los_Angeles", - } + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"], + "description": "The unit of temperature, either 'celsius' or 'fahrenheit'", }, - "required": ["timezone"], - } - ) + }, + "required": ["location"], + } + assert tool_defs[1]["name"] == "get_time" + assert tool_defs[1]["description"] == "Get the current time in a given time zone" + assert tool_defs[1]["input_schema"] == { + "type": "object", + "properties": { + "timezone": { + "type": "string", + "description": "The IANA time zone name, e.g. America/Los_Angeles", + } + }, + "required": ["timezone"], + } # verify response and output - assert ( - anthropic_span.attributes["gen_ai.completion.0.finish_reason"] - == response.stop_reason - ) - assert ( - anthropic_span.attributes["gen_ai.completion.0.content"] - == response.content[0].text - ) - assert anthropic_span.attributes["gen_ai.completion.0.role"] == "assistant" - - assert ( - (anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.id"]) - == response.content[1].id - ) - assert ( - (anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.name"]) - == response.content[1].name - ) - response_input = json.dumps(response.content[1].input) - assert ( - anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.arguments"] - == response_input - ) - - assert ( - (anthropic_span.attributes["gen_ai.completion.0.tool_calls.1.id"]) - == response.content[2].id - ) - assert ( - (anthropic_span.attributes["gen_ai.completion.0.tool_calls.1.name"]) - == response.content[2].name - ) - response_input = json.dumps(response.content[2].input) - assert ( - anthropic_span.attributes["gen_ai.completion.0.tool_calls.1.arguments"] - == response_input - ) + finish_reasons = anthropic_span.attributes[GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS] + assert response.stop_reason in finish_reasons + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response.content[0].text + assert output_messages[-1]["role"] == "assistant" + tool_calls = output_messages[-1]["tool_calls"] + assert tool_calls[0]["id"] == response.content[1].id + assert tool_calls[0]["name"] == response.content[1].name + assert json.loads(tool_calls[0]["arguments"]) == response.content[1].input + assert tool_calls[1]["id"] == response.content[2].id + assert tool_calls[1]["name"] == response.content[2].name + assert json.loads(tool_calls[1]["arguments"]) == response.content[2].input assert ( anthropic_span.attributes.get("gen_ai.response.id") == "msg_01RBkXFe9TmDNNWThMz2HmGt" @@ -1435,7 +1332,7 @@ def test_anthropic_tools_with_events_with_content( assert ( anthropic_span.attributes["gen_ai.usage.output_tokens"] + anthropic_span.attributes["gen_ai.usage.input_tokens"] - == anthropic_span.attributes["llm.usage.total_tokens"] + == anthropic_span.attributes["gen_ai.usage.total_tokens"] ) # verify metrics @@ -1537,7 +1434,7 @@ def test_anthropic_tools_with_events_with_no_content( assert ( anthropic_span.attributes["gen_ai.usage.output_tokens"] + anthropic_span.attributes["gen_ai.usage.input_tokens"] - == anthropic_span.attributes["llm.usage.total_tokens"] + == anthropic_span.attributes["gen_ai.usage.total_tokens"] ) # verify metrics @@ -1658,7 +1555,7 @@ def test_anthropic_tools_history_legacy( assert ( anthropic_span.attributes["gen_ai.usage.output_tokens"] + anthropic_span.attributes["gen_ai.usage.input_tokens"] - == anthropic_span.attributes["llm.usage.total_tokens"] + == anthropic_span.attributes["gen_ai.usage.total_tokens"] ) # verify metrics @@ -1667,84 +1564,60 @@ def test_anthropic_tools_history_legacy( verify_metrics(resource_metrics, "claude-3-5-haiku-20241022") # verify request and inputs - assert ( - anthropic_span.attributes["gen_ai.prompt.0.content"] - == "What is the weather and current time in San Francisco?" - ) - assert anthropic_span.attributes["gen_ai.prompt.0.role"] == "user" - prompt_1_content = json.loads(anthropic_span.attributes["gen_ai.prompt.1.content"]) - assert prompt_1_content[0]["text"] == "I'll help you get the weather and current time in San Francisco." - assert anthropic_span.attributes["gen_ai.prompt.1.role"] == "assistant" - assert json.loads(anthropic_span.attributes["gen_ai.prompt.2.content"]) == [ + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "What is the weather and current time in San Francisco?" + assert input_messages[0]["role"] == "user" + assert input_messages[1]["content"] == "I'll help you get the weather and current time in San Francisco." + assert input_messages[1]["role"] == "assistant" + assert json.loads(input_messages[2]["content"]) == [ { "type": "tool_result", "content": "Sunny and 65 degrees Fahrenheit", "tool_use_id": "call_1", } ] - assert anthropic_span.attributes["gen_ai.prompt.2.role"] == "user" - assert anthropic_span.attributes["llm.request.functions.0.name"] == "get_weather" - assert ( - anthropic_span.attributes["llm.request.functions.0.description"] - == "Get the current weather in a given location" - ) - assert anthropic_span.attributes[ - "llm.request.functions.0.input_schema" - ] == json.dumps( - { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"], - "description": "The unit of temperature, either 'celsius' or 'fahrenheit'", - }, + assert input_messages[2]["role"] == "user" + + tool_defs = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_TOOL_DEFINITIONS]) + assert tool_defs[0]["name"] == "get_weather" + assert tool_defs[0]["description"] == "Get the current weather in a given location" + assert tool_defs[0]["input_schema"] == { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", }, - "required": ["location"], - } - ) - assert anthropic_span.attributes["llm.request.functions.1.name"] == "get_time" - assert ( - anthropic_span.attributes["llm.request.functions.1.description"] - == "Get the current time in a given time zone" - ) - assert anthropic_span.attributes[ - "llm.request.functions.1.input_schema" - ] == json.dumps( - { - "type": "object", - "properties": { - "timezone": { - "type": "string", - "description": "The IANA time zone name, e.g. America/Los_Angeles", - } + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"], + "description": "The unit of temperature, either 'celsius' or 'fahrenheit'", }, - "required": ["timezone"], - } - ) + }, + "required": ["location"], + } + assert tool_defs[1]["name"] == "get_time" + assert tool_defs[1]["description"] == "Get the current time in a given time zone" + assert tool_defs[1]["input_schema"] == { + "type": "object", + "properties": { + "timezone": { + "type": "string", + "description": "The IANA time zone name, e.g. America/Los_Angeles", + } + }, + "required": ["timezone"], + } # verify response and output - assert ( - anthropic_span.attributes["gen_ai.completion.0.finish_reason"] - == response.stop_reason - ) - assert anthropic_span.attributes["gen_ai.completion.0.role"] == "assistant" - - assert ( - anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.id"] - ) == response.content[0].id - assert ( - anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.name"] - ) == response.content[0].name - response_input = json.dumps(response.content[0].input) - assert ( - anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.arguments"] - == response_input - ) + finish_reasons = anthropic_span.attributes[GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS] + assert response.stop_reason in finish_reasons + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["role"] == "assistant" + tool_calls = output_messages[-1]["tool_calls"] + assert tool_calls[0]["id"] == response.content[0].id + assert tool_calls[0]["name"] == response.content[0].name + assert json.loads(tool_calls[0]["arguments"]) == response.content[0].input assert ( anthropic_span.attributes.get("gen_ai.response.id") @@ -1814,7 +1687,7 @@ def test_anthropic_tools_history_with_events_with_content( assert ( anthropic_span.attributes["gen_ai.usage.output_tokens"] + anthropic_span.attributes["gen_ai.usage.input_tokens"] - == anthropic_span.attributes["llm.usage.total_tokens"] + == anthropic_span.attributes["gen_ai.usage.total_tokens"] ) # verify metrics @@ -1916,7 +1789,7 @@ def test_anthropic_tools_history_with_events_with_no_content( assert ( anthropic_span.attributes["gen_ai.usage.output_tokens"] + anthropic_span.attributes["gen_ai.usage.input_tokens"] - == anthropic_span.attributes["llm.usage.total_tokens"] + == anthropic_span.attributes["gen_ai.usage.total_tokens"] ) # verify metrics @@ -1998,7 +1871,7 @@ def test_anthropic_tools_streaming_legacy( assert ( anthropic_span.attributes["gen_ai.usage.output_tokens"] + anthropic_span.attributes["gen_ai.usage.input_tokens"] - == anthropic_span.attributes["llm.usage.total_tokens"] + == anthropic_span.attributes["gen_ai.usage.total_tokens"] ) # verify metrics @@ -2007,73 +1880,56 @@ def test_anthropic_tools_streaming_legacy( verify_metrics(resource_metrics, "claude-3-5-sonnet-20240620") # verify request and inputs - assert ( - anthropic_span.attributes["gen_ai.prompt.0.content"] - == "What is the weather and current time in San Francisco?" - ) - assert anthropic_span.attributes["gen_ai.prompt.0.role"] == "user" - assert anthropic_span.attributes["llm.request.functions.0.name"] == "get_weather" - assert ( - anthropic_span.attributes["llm.request.functions.0.description"] - == "Get the current weather in a given location" - ) - assert anthropic_span.attributes[ - "llm.request.functions.0.input_schema" - ] == json.dumps( - { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"], - "description": "The unit of temperature, either 'celsius' or 'fahrenheit'", - }, + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "What is the weather and current time in San Francisco?" + assert input_messages[0]["role"] == "user" + tool_defs = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_TOOL_DEFINITIONS]) + assert tool_defs[0]["name"] == "get_weather" + assert tool_defs[0]["description"] == "Get the current weather in a given location" + assert tool_defs[0]["input_schema"] == { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", }, - "required": ["location"], - } - ) - assert anthropic_span.attributes["llm.request.functions.1.name"] == "get_time" - assert ( - anthropic_span.attributes["llm.request.functions.1.description"] - == "Get the current time in a given time zone" - ) - assert anthropic_span.attributes[ - "llm.request.functions.1.input_schema" - ] == json.dumps( - { - "type": "object", - "properties": { - "timezone": { - "type": "string", - "description": "The IANA time zone name, e.g. America/Los_Angeles", - } + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"], + "description": "The unit of temperature, either 'celsius' or 'fahrenheit'", }, - "required": ["timezone"], - } - ) + }, + "required": ["location"], + } + assert tool_defs[1]["name"] == "get_time" + assert tool_defs[1]["description"] == "Get the current time in a given time zone" + assert tool_defs[1]["input_schema"] == { + "type": "object", + "properties": { + "timezone": { + "type": "string", + "description": "The IANA time zone name, e.g. America/Los_Angeles", + } + }, + "required": ["timezone"], + } # verify response and output - assert ( - anthropic_span.attributes["gen_ai.completion.0.content"] - == "Certainly! I can help you with that information. " + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + text_msg = next(m for m in output_messages if m.get("content")) + assert text_msg["content"] == ( + "Certainly! I can help you with that information. " "To get the weather and current time in San Francisco, I'll need to use " "two separate functions. Let me fetch that data for you." ) - assert anthropic_span.attributes["gen_ai.completion.0.role"] == "assistant" - - assert ( - anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.id"] - ) == "toolu_0121kXsENLvoDZ72LCuAnCCz" - assert ( - anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.name"] - ) == "get_time" - assert json.loads( - anthropic_span.attributes["gen_ai.completion.0.tool_calls.0.arguments"] - ) == {"timezone": "America/Los_Angeles"} + assert text_msg["role"] == "assistant" + tool_msgs = [m for m in output_messages if m.get("tool_calls")] + assert len(tool_msgs) == 2 + assert tool_msgs[0]["tool_calls"][0]["id"] == "toolu_014x5X91kx3fvdhpLvwXZWE2" + assert tool_msgs[0]["tool_calls"][0]["name"] == "get_weather" + assert tool_msgs[1]["tool_calls"][0]["id"] == "toolu_0121kXsENLvoDZ72LCuAnCCz" + assert tool_msgs[1]["tool_calls"][0]["name"] == "get_time" + assert json.loads(tool_msgs[1]["tool_calls"][0]["arguments"]) == {"timezone": "America/Los_Angeles"} assert ( anthropic_span.attributes.get("gen_ai.response.id") @@ -2124,7 +1980,7 @@ def test_anthropic_tools_streaming_with_events_with_content( assert ( anthropic_span.attributes["gen_ai.usage.output_tokens"] + anthropic_span.attributes["gen_ai.usage.input_tokens"] - == anthropic_span.attributes["llm.usage.total_tokens"] + == anthropic_span.attributes["gen_ai.usage.total_tokens"] ) # verify metrics @@ -2236,7 +2092,7 @@ def test_anthropic_tools_streaming_with_events_with_no_content( assert ( anthropic_span.attributes["gen_ai.usage.output_tokens"] + anthropic_span.attributes["gen_ai.usage.input_tokens"] - == anthropic_span.attributes["llm.usage.total_tokens"] + == anthropic_span.attributes["gen_ai.usage.total_tokens"] ) # verify metrics @@ -2484,24 +2340,17 @@ def test_anthropic_message_stream_manager_legacy( "anthropic.chat", ] anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Tell me a joke about OpenTelemetry" - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "user" - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response_content - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "Tell me a joke about OpenTelemetry" + assert input_messages[0]["role"] == "user" + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response_content + assert output_messages[-1]["role"] == "assistant" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 17 assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) assert ( anthropic_span.attributes.get("gen_ai.response.id") @@ -2554,7 +2403,7 @@ def test_anthropic_message_stream_manager_with_events_with_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -2618,7 +2467,7 @@ def test_anthropic_message_stream_manager_with_events_with_no_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -2674,24 +2523,17 @@ async def test_async_anthropic_message_stream_manager_legacy( "anthropic.chat", ] anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Tell me a joke about OpenTelemetry" - ) - assert (anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"]) == "user" - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response_content - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "Tell me a joke about OpenTelemetry" + assert input_messages[0]["role"] == "user" + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response_content + assert output_messages[-1]["role"] == "assistant" assert anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] == 17 assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) assert ( anthropic_span.attributes.get("gen_ai.response.id") @@ -2744,7 +2586,7 @@ async def test_async_anthropic_message_stream_manager_with_events_with_content( assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -2807,7 +2649,7 @@ async def test_async_anthropic_message_stream_manager_with_events_with_no_conten assert ( anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS] + anthropic_span.attributes[GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS] - == anthropic_span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] + == anthropic_span.attributes[SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS] ) metrics_data = reader.get_metrics_data() @@ -2941,11 +2783,9 @@ async def test_async_anthropic_beta_message_stream_manager_legacy( "anthropic.chat", ] anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Tell me a joke about OpenTelemetry" - ) - assert anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"] == "user" + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "Tell me a joke about OpenTelemetry" + assert input_messages[0]["role"] == "user" logs = log_exporter.get_finished_logs() assert len(logs) == 0, ( diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/test_prompt_caching.py b/packages/opentelemetry-instrumentation-anthropic/tests/test_prompt_caching.py index 67988867b1..775abc4048 100644 --- a/packages/opentelemetry-instrumentation-anthropic/tests/test_prompt_caching.py +++ b/packages/opentelemetry-instrumentation-anthropic/tests/test_prompt_caching.py @@ -1,4 +1,5 @@ from pathlib import Path +import json import pytest from opentelemetry.sdk._logs import ReadableLogRecord @@ -19,30 +20,30 @@ def _verify_caching_attributes( cached_tokens: int, ): assert ( - cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] - == cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] + cache_creation_span.attributes["gen_ai.usage.cache_creation.input_tokens"] + == cache_read_span.attributes["gen_ai.usage.cache_read.input_tokens"] ) # first check that cache_creation_span only wrote to cache, but not read from it, - assert cache_creation_span.attributes["gen_ai.usage.cache_read_input_tokens"] == 0 + assert cache_creation_span.attributes["gen_ai.usage.cache_read.input_tokens"] == 0 assert ( - cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] != 0 + cache_creation_span.attributes["gen_ai.usage.cache_creation.input_tokens"] != 0 ) # then check for exact figures for the fixture/cassette assert ( - cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] + cache_creation_span.attributes["gen_ai.usage.cache_creation.input_tokens"] == cached_tokens ) assert cache_creation_span.attributes["gen_ai.usage.input_tokens"] == input_tokens assert cache_creation_span.attributes["gen_ai.usage.output_tokens"] == cache_creation_span_output_tokens # first check that cache_read_span only read from cache, but not wrote to it, - assert cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] != 0 - assert cache_read_span.attributes["gen_ai.usage.cache_creation_input_tokens"] == 0 + assert cache_read_span.attributes["gen_ai.usage.cache_read.input_tokens"] != 0 + assert cache_read_span.attributes["gen_ai.usage.cache_creation.input_tokens"] == 0 # then check for exact figures for the fixture/cassette - assert cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] == cached_tokens + assert cache_read_span.attributes["gen_ai.usage.cache_read.input_tokens"] == cached_tokens assert cache_read_span.attributes["gen_ai.usage.input_tokens"] == input_tokens assert cache_read_span.attributes["gen_ai.usage.output_tokens"] == cache_read_span_output_tokens @@ -98,19 +99,19 @@ def test_anthropic_prompt_caching_legacy( cache_creation_span = spans[0] cache_read_span = spans[1] - assert cache_creation_span.attributes["gen_ai.prompt.0.role"] == "system" - assert system_message == cache_creation_span.attributes["gen_ai.prompt.0.content"] - assert cache_read_span.attributes["gen_ai.prompt.0.role"] == "system" - assert system_message == cache_read_span.attributes["gen_ai.prompt.0.content"] + assert cache_creation_span.attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] == system_message + assert cache_read_span.attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] == system_message - assert cache_creation_span.attributes["gen_ai.prompt.1.role"] == "user" - assert text == cache_creation_span.attributes["gen_ai.prompt.1.content"] - assert cache_read_span.attributes["gen_ai.prompt.1.role"] == "user" - assert text == cache_read_span.attributes["gen_ai.prompt.1.content"] + cc_input = json.loads(cache_creation_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert cc_input[0]["role"] == "user" + assert text == cc_input[0]["content"] + cr_input = json.loads(cache_read_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert cr_input[0]["role"] == "user" + assert text == cr_input[0]["content"] assert ( - cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] - == cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] + cache_creation_span.attributes["gen_ai.usage.cache_creation.input_tokens"] + == cache_read_span.attributes["gen_ai.usage.cache_read.input_tokens"] ) assert ( @@ -122,8 +123,8 @@ def test_anthropic_prompt_caching_legacy( == "msg_01YGB3PuEANUSkLuzemhtNVF" ) - assert cache_creation_span.attributes["gen_ai.completion.0.role"] == "assistant" - assert cache_read_span.attributes["gen_ai.completion.0.role"] == "assistant" + assert json.loads(cache_creation_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES])[-1]["role"] == "assistant" + assert json.loads(cache_read_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES])[-1]["role"] == "assistant" _verify_caching_attributes(cache_creation_span, cache_read_span, 1167, 187, 202, 1163) @@ -435,15 +436,15 @@ async def test_anthropic_prompt_caching_async_legacy( cache_creation_span = spans[0] cache_read_span = spans[1] - assert cache_creation_span.attributes["gen_ai.prompt.0.role"] == "system" - assert system_message == cache_creation_span.attributes["gen_ai.prompt.0.content"] - assert cache_read_span.attributes["gen_ai.prompt.0.role"] == "system" - assert system_message == cache_read_span.attributes["gen_ai.prompt.0.content"] + assert cache_creation_span.attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] == system_message + assert cache_read_span.attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] == system_message - assert cache_creation_span.attributes["gen_ai.prompt.1.role"] == "user" - assert text == cache_creation_span.attributes["gen_ai.prompt.1.content"] - assert cache_read_span.attributes["gen_ai.prompt.1.role"] == "user" - assert text == cache_read_span.attributes["gen_ai.prompt.1.content"] + cc_input = json.loads(cache_creation_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert cc_input[0]["role"] == "user" + assert text == cc_input[0]["content"] + cr_input = json.loads(cache_read_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert cr_input[0]["role"] == "user" + assert text == cr_input[0]["content"] assert ( cache_creation_span.attributes.get("gen_ai.response.id") @@ -454,8 +455,8 @@ async def test_anthropic_prompt_caching_async_legacy( == "msg_01Q8hYZvCMAQKC4n8X3zFnrX" ) - assert cache_creation_span.attributes["gen_ai.completion.0.role"] == "assistant" - assert cache_read_span.attributes["gen_ai.completion.0.role"] == "assistant" + assert json.loads(cache_creation_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES])[-1]["role"] == "assistant" + assert json.loads(cache_read_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES])[-1]["role"] == "assistant" _verify_caching_attributes(cache_creation_span, cache_read_span, 1169, 207, 224, 1165) @@ -691,8 +692,8 @@ async def test_anthropic_prompt_caching_async_with_events_with_no_content( cache_read_span = spans[1] assert ( - cache_creation_span.attributes["gen_ai.usage.cache_creation_input_tokens"] - == cache_read_span.attributes["gen_ai.usage.cache_read_input_tokens"] + cache_creation_span.attributes["gen_ai.usage.cache_creation.input_tokens"] + == cache_read_span.attributes["gen_ai.usage.cache_read.input_tokens"] ) _verify_caching_attributes(cache_creation_span, cache_read_span, 1169, 207, 224, 1165) @@ -782,15 +783,15 @@ def test_anthropic_prompt_caching_stream_legacy( cache_creation_span = spans[0] cache_read_span = spans[1] - assert cache_creation_span.attributes["gen_ai.prompt.0.role"] == "system" - assert system_message == cache_creation_span.attributes["gen_ai.prompt.0.content"] - assert cache_read_span.attributes["gen_ai.prompt.0.role"] == "system" - assert system_message == cache_read_span.attributes["gen_ai.prompt.0.content"] + assert cache_creation_span.attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] == system_message + assert cache_read_span.attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] == system_message - assert cache_creation_span.attributes["gen_ai.prompt.1.role"] == "user" - assert text == cache_creation_span.attributes["gen_ai.prompt.1.content"] - assert cache_read_span.attributes["gen_ai.prompt.1.role"] == "user" - assert text == cache_read_span.attributes["gen_ai.prompt.1.content"] + cc_input = json.loads(cache_creation_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert cc_input[0]["role"] == "user" + assert text == cc_input[0]["content"] + cr_input = json.loads(cache_read_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert cr_input[0]["role"] == "user" + assert text == cr_input[0]["content"] assert ( cache_creation_span.attributes.get("gen_ai.response.id") @@ -801,8 +802,8 @@ def test_anthropic_prompt_caching_stream_legacy( == "msg_01XQRA3bs4SB4yTBMwD3dbUi" ) - assert cache_creation_span.attributes["gen_ai.completion.0.role"] == "assistant" - assert cache_read_span.attributes["gen_ai.completion.0.role"] == "assistant" + assert json.loads(cache_creation_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES])[-1]["role"] == "assistant" + assert json.loads(cache_read_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES])[-1]["role"] == "assistant" _verify_caching_attributes(cache_creation_span, cache_read_span, 1169, 202, 222, 1165) @@ -1133,10 +1134,8 @@ async def test_anthropic_prompt_caching_async_stream_legacy( cache_creation_span = spans[0] cache_read_span = spans[1] - assert cache_creation_span.attributes["gen_ai.prompt.0.role"] == "system" - assert system_message == cache_creation_span.attributes["gen_ai.prompt.0.content"] - assert cache_read_span.attributes["gen_ai.prompt.0.role"] == "system" - assert system_message == cache_read_span.attributes["gen_ai.prompt.0.content"] + assert cache_creation_span.attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] == system_message + assert cache_read_span.attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] == system_message assert ( cache_creation_span.attributes.get("gen_ai.response.id") == "msg_01KQCu5jXyou55u6YFNk6uqu" @@ -1146,13 +1145,15 @@ async def test_anthropic_prompt_caching_async_stream_legacy( == "msg_01GZo7EAMfEuzRqTKrFANNpA" ) - assert cache_creation_span.attributes["gen_ai.completion.0.role"] == "assistant" - assert cache_read_span.attributes["gen_ai.completion.0.role"] == "assistant" + assert json.loads(cache_creation_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES])[-1]["role"] == "assistant" + assert json.loads(cache_read_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES])[-1]["role"] == "assistant" - assert cache_creation_span.attributes["gen_ai.prompt.1.role"] == "user" - assert text == cache_creation_span.attributes["gen_ai.prompt.1.content"] - assert cache_read_span.attributes["gen_ai.prompt.1.role"] == "user" - assert text == cache_read_span.attributes["gen_ai.prompt.1.content"] + cc_input = json.loads(cache_creation_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert cc_input[0]["role"] == "user" + assert text == cc_input[0]["content"] + cr_input = json.loads(cache_read_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert cr_input[0]["role"] == "user" + assert text == cr_input[0]["content"] _verify_caching_attributes(cache_creation_span, cache_read_span, 1171, 290, 257, 1167) diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/test_semconv_compliance.py b/packages/opentelemetry-instrumentation-anthropic/tests/test_semconv_compliance.py new file mode 100644 index 0000000000..35a01e3380 --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/tests/test_semconv_compliance.py @@ -0,0 +1,8 @@ +# ruff: noqa: F401, F403 +""" +Semconv compliance tests re-used from opentelemetry-semantic-conventions-ai. + +Ensures the installed semconv package has the expected constant values. +To add more compliance checks, update _testing.py in that package — not here. +""" +from opentelemetry.semconv_ai._testing import * diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/test_semconv_span_attrs.py b/packages/opentelemetry-instrumentation-anthropic/tests/test_semconv_span_attrs.py new file mode 100644 index 0000000000..8579f15edf --- /dev/null +++ b/packages/opentelemetry-instrumentation-anthropic/tests/test_semconv_span_attrs.py @@ -0,0 +1,736 @@ +""" +Unit tests for OTel GenAI semantic conventions compliance in span attributes. + +These tests verify that span_utils.py emits the new OTel GenAI spec attributes: + - gen_ai.input.messages (replaces gen_ai.prompt.{i}.*) + - gen_ai.output.messages (replaces gen_ai.completion.{i}.*) + - gen_ai.system_instructions (replaces gen_ai.prompt.0 with role=system) + - gen_ai.tool.definitions (replaces llm.request.functions.{i}.*) + - gen_ai.response.finish_reasons (array, replaces per-completion finish_reason) +""" + +import asyncio +import json +import os +from types import SimpleNamespace +from unittest.mock import MagicMock + +import pytest +from opentelemetry.semconv._incubating.attributes import ( + gen_ai_attributes as GenAIAttributes, +) +from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import ( + GenAiOperationNameValues, + GenAiSystemValues, +) + +from opentelemetry.instrumentation.anthropic.span_utils import ( + aset_input_attributes, + set_response_attributes, + set_streaming_response_attributes, +) +from opentelemetry.instrumentation.anthropic.utils import TRACELOOP_TRACE_CONTENT + + +@pytest.fixture(autouse=True) +def enable_content_tracing(): + """Ensure content tracing is enabled for all tests.""" + os.environ[TRACELOOP_TRACE_CONTENT] = "true" + yield + os.environ.pop(TRACELOOP_TRACE_CONTENT, None) + + +def make_span(): + """Create a mock span that collects set_attribute calls.""" + attributes = {} + span = MagicMock() + span.attributes = attributes + span.set_attribute = lambda k, v: attributes.update({k: v}) + span.context.trace_id = 1234567890 + span.context.span_id = 9876543210 + return span + + +# --------------------------------------------------------------------------- +# Input attribute tests +# --------------------------------------------------------------------------- + +def test_input_messages_simple_user_message(): + """gen_ai.input.messages should be a JSON array with role+content.""" + span = make_span() + kwargs = { + "model": "claude-3-opus-20240229", + "messages": [{"role": "user", "content": "Tell me a joke"}], + "max_tokens": 1024, + } + asyncio.run(aset_input_attributes(span, kwargs)) + + assert GenAIAttributes.GEN_AI_INPUT_MESSAGES in span.attributes, ( + "gen_ai.input.messages must be set" + ) + messages = json.loads(span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert messages == [{"role": "user", "content": "Tell me a joke"}] + + # Old attribute must NOT be set + assert f"{GenAIAttributes.GEN_AI_PROMPT}.0.content" not in span.attributes + assert f"{GenAIAttributes.GEN_AI_PROMPT}.0.role" not in span.attributes + + +def test_input_messages_multi_turn(): + """gen_ai.input.messages should include all turns.""" + span = make_span() + kwargs = { + "model": "claude-3-opus-20240229", + "messages": [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there!"}, + {"role": "user", "content": "How are you?"}, + ], + "max_tokens": 1024, + } + asyncio.run(aset_input_attributes(span, kwargs)) + + messages = json.loads(span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert len(messages) == 3 + assert messages[0] == {"role": "user", "content": "Hello"} + assert messages[1] == {"role": "assistant", "content": "Hi there!"} + assert messages[2] == {"role": "user", "content": "How are you?"} + + +def test_system_instructions_attribute(): + """System prompt should be in gen_ai.system_instructions, NOT as a message.""" + span = make_span() + kwargs = { + "model": "claude-3-opus-20240229", + "system": "You are a helpful assistant.", + "messages": [{"role": "user", "content": "Hello"}], + "max_tokens": 1024, + } + asyncio.run(aset_input_attributes(span, kwargs)) + + # System instructions should be a standalone attribute + assert GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS in span.attributes, ( + "gen_ai.system_instructions must be set" + ) + assert span.attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] == ( + "You are a helpful assistant." + ) + + # System should NOT appear as part of gen_ai.input.messages + messages = json.loads(span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + roles = [m["role"] for m in messages] + assert "system" not in roles, ( + "System message must not appear inside gen_ai.input.messages" + ) + + # Old attribute must NOT be set + assert f"{GenAIAttributes.GEN_AI_PROMPT}.0.role" not in span.attributes + + +def test_tool_definitions_attribute(): + """Tools should be serialised into gen_ai.tool.definitions JSON array.""" + span = make_span() + tools = [ + { + "name": "get_weather", + "description": "Get the current weather", + "input_schema": { + "type": "object", + "properties": {"location": {"type": "string"}}, + "required": ["location"], + }, + } + ] + kwargs = { + "model": "claude-3-opus-20240229", + "messages": [{"role": "user", "content": "What's the weather?"}], + "tools": tools, + "max_tokens": 1024, + } + asyncio.run(aset_input_attributes(span, kwargs)) + + assert GenAIAttributes.GEN_AI_TOOL_DEFINITIONS in span.attributes, ( + "gen_ai.tool.definitions must be set" + ) + defs = json.loads(span.attributes[GenAIAttributes.GEN_AI_TOOL_DEFINITIONS]) + assert len(defs) == 1 + assert defs[0]["name"] == "get_weather" + assert defs[0]["description"] == "Get the current weather" + assert "input_schema" in defs[0] + + # Old attribute must NOT be set + assert f"{GenAIAttributes.GEN_AI_TOOL_DEFINITIONS}.0.name" not in span.attributes + + +def test_input_messages_with_tool_calls_in_content(): + """Tool use blocks in assistant messages should be captured as tool_calls.""" + span = make_span() + kwargs = { + "model": "claude-3-opus-20240229", + "messages": [ + {"role": "user", "content": "What's the weather in SF?"}, + { + "role": "assistant", + "content": [ + { + "type": "tool_use", + "id": "tool_123", + "name": "get_weather", + "input": {"location": "San Francisco"}, + } + ], + }, + ], + "max_tokens": 1024, + } + asyncio.run(aset_input_attributes(span, kwargs)) + + messages = json.loads(span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assistant_msg = messages[1] + assert "tool_calls" in assistant_msg + assert assistant_msg["tool_calls"][0]["id"] == "tool_123" + assert assistant_msg["tool_calls"][0]["name"] == "get_weather" + + +def test_tool_use_blocks_not_duplicated_in_content(): + """Tool use blocks must appear only in tool_calls, not also in content.""" + span = make_span() + kwargs = { + "model": "claude-3-opus-20240229", + "messages": [ + {"role": "user", "content": "What's the weather in SF?"}, + { + "role": "assistant", + "content": [ + {"type": "text", "text": "Let me check that for you."}, + { + "type": "tool_use", + "id": "tool_123", + "name": "get_weather", + "input": {"location": "San Francisco"}, + }, + ], + }, + ], + "max_tokens": 1024, + } + asyncio.run(aset_input_attributes(span, kwargs)) + + messages = json.loads(span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assistant_msg = messages[1] + + # tool_use block must be in tool_calls + assert "tool_calls" in assistant_msg + assert len(assistant_msg["tool_calls"]) == 1 + assert assistant_msg["tool_calls"][0]["id"] == "tool_123" + + # tool_use block must NOT appear in content + content = assistant_msg.get("content", "") + if isinstance(content, str): + content_parsed = json.loads(content) if content.startswith("[") else content + else: + content_parsed = content + if isinstance(content_parsed, list): + types_in_content = [b.get("type") for b in content_parsed if isinstance(b, dict)] + assert "tool_use" not in types_in_content, ( + "tool_use block must not be duplicated inside content" + ) + + +def test_tool_use_only_content_has_no_content_field(): + """When content is exclusively tool_use blocks, content should be None/absent.""" + span = make_span() + kwargs = { + "model": "claude-3-opus-20240229", + "messages": [ + {"role": "user", "content": "What's the weather in SF?"}, + { + "role": "assistant", + "content": [ + { + "type": "tool_use", + "id": "tool_123", + "name": "get_weather", + "input": {"location": "San Francisco"}, + } + ], + }, + ], + "max_tokens": 1024, + } + asyncio.run(aset_input_attributes(span, kwargs)) + + messages = json.loads(span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assistant_msg = messages[1] + + assert "tool_calls" in assistant_msg + # content should be None (no non-tool-use blocks) + assert assistant_msg.get("content") is None + + +# --------------------------------------------------------------------------- +# Response / completion attribute tests +# --------------------------------------------------------------------------- + +def _make_text_block(text): + block = MagicMock() + block.type = "text" + block.text = text + return block + + +def _make_tool_use_block(tool_id, name, input_data): + block = MagicMock() + block.type = "tool_use" + block.id = tool_id + block.name = name + block.input = input_data + return block + + +def _make_response(content_blocks, stop_reason="end_turn", role="assistant"): + usage = SimpleNamespace(input_tokens=10, output_tokens=5) + response = { + "model": "claude-3-opus-20240229", + "id": "msg_abc123", + "role": role, + "stop_reason": stop_reason, + "content": content_blocks, + "usage": usage, + } + return response + + +def test_output_messages_text_response(): + """gen_ai.output.messages should be a JSON array with the assistant response.""" + span = make_span() + response = _make_response([_make_text_block("Why did the chicken cross the road?")]) + set_response_attributes(span, response) + + assert GenAIAttributes.GEN_AI_OUTPUT_MESSAGES in span.attributes, ( + "gen_ai.output.messages must be set" + ) + output = json.loads(span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert len(output) == 1 + assert output[0]["role"] == "assistant" + assert output[0]["content"] == "Why did the chicken cross the road?" + + # Old attribute must NOT be set + assert f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content" not in span.attributes + assert f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role" not in span.attributes + + +def test_response_finish_reasons_attribute(): + """gen_ai.response.finish_reasons should be a list on the span.""" + span = make_span() + response = _make_response([_make_text_block("Hello")], stop_reason="end_turn") + set_response_attributes(span, response) + + assert GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS in span.attributes, ( + "gen_ai.response.finish_reasons must be set" + ) + finish_reasons = span.attributes[GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS] + assert isinstance(finish_reasons, (list, tuple)) + assert "end_turn" in finish_reasons + + # Per-completion finish_reason must NOT be set as old attr + assert f"{GenAIAttributes.GEN_AI_COMPLETION}.0.finish_reason" not in span.attributes + + +def test_finish_reasons_set_when_content_tracing_disabled(): + """gen_ai.response.finish_reasons must be recorded even when TRACELOOP_TRACE_CONTENT=false.""" + os.environ[TRACELOOP_TRACE_CONTENT] = "false" + + span = make_span() + response = _make_response([_make_text_block("Secret content")], stop_reason="end_turn") + set_response_attributes(span, response) + + assert GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS in span.attributes, ( + "gen_ai.response.finish_reasons must be set regardless of content tracing" + ) + assert "end_turn" in span.attributes[GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS] + # Content must NOT be present + assert GenAIAttributes.GEN_AI_OUTPUT_MESSAGES not in span.attributes + + +def test_streaming_finish_reasons_set_when_content_tracing_disabled(): + """Streaming finish_reasons must be recorded even when TRACELOOP_TRACE_CONTENT=false.""" + os.environ[TRACELOOP_TRACE_CONTENT] = "false" + + span = make_span() + events = [{"type": "text", "text": "Secret content", "finish_reason": "end_turn", "index": 0}] + set_streaming_response_attributes(span, events) + + assert GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS in span.attributes, ( + "streaming finish_reasons must be set regardless of content tracing" + ) + assert "end_turn" in span.attributes[GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS] + # Content must NOT be present + assert GenAIAttributes.GEN_AI_OUTPUT_MESSAGES not in span.attributes + + +def test_output_messages_tool_use_response(): + """Tool use in the response should appear as tool_calls in gen_ai.output.messages.""" + span = make_span() + tool_block = _make_tool_use_block("tool_456", "get_weather", {"location": "NYC"}) + response = _make_response([tool_block], stop_reason="tool_use") + set_response_attributes(span, response) + + output = json.loads(span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert len(output) == 1 + assert "tool_calls" in output[0] + tc = output[0]["tool_calls"][0] + assert tc["id"] == "tool_456" + assert tc["name"] == "get_weather" + + +def test_output_messages_streaming(): + """set_streaming_response_attributes should also use gen_ai.output.messages.""" + span = make_span() + events = [ + { + "type": "text", + "text": "Streaming response", + "finish_reason": "end_turn", + "index": 0, + } + ] + set_streaming_response_attributes(span, events) + + assert GenAIAttributes.GEN_AI_OUTPUT_MESSAGES in span.attributes, ( + "gen_ai.output.messages must be set for streaming" + ) + output = json.loads(span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output[0]["content"] == "Streaming response" + + assert GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS in span.attributes + assert "end_turn" in span.attributes[GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS] + + # Old attribute must NOT be set + assert f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content" not in span.attributes + + +def test_output_messages_streaming_tool_use(): + """Streaming tool use should appear in gen_ai.output.messages.""" + span = make_span() + events = [ + { + "type": "tool_use", + "id": "tool_789", + "name": "get_weather", + "input": '{"location": "Boston"}', + "finish_reason": "tool_use", + "index": 0, + } + ] + set_streaming_response_attributes(span, events) + + output = json.loads(span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert len(output) == 1 + assert "tool_calls" in output[0] + assert output[0]["tool_calls"][0]["id"] == "tool_789" + + +# --------------------------------------------------------------------------- +# Span identity attribute tests (#4, #5) +# --------------------------------------------------------------------------- + +def test_gen_ai_provider_name_is_set(): + """gen_ai.provider.name must be set on every span.""" + from opentelemetry.instrumentation.anthropic import _wrap + from unittest.mock import patch, MagicMock + + tracer = MagicMock() + captured = {} + + def fake_start_span(name, kind, attributes): + captured["attributes"] = attributes + span = MagicMock() + span.is_recording.return_value = False + return span + + tracer.start_span.side_effect = fake_start_span + + to_wrap = {"span_name": "anthropic.chat"} + wrapped_fn = MagicMock(return_value=None) + + with patch("opentelemetry.context.get_value", return_value=False): + fn = _wrap(tracer, None, None, None, None, None, to_wrap) + fn(wrapped_fn, MagicMock(), [], {"model": "claude-3-opus-20240229", "messages": [], "max_tokens": 10}) + + assert GenAIAttributes.GEN_AI_PROVIDER_NAME in captured["attributes"], ( + "gen_ai.provider.name must be set on every span" + ) + assert captured["attributes"][GenAIAttributes.GEN_AI_PROVIDER_NAME] == GenAiSystemValues.ANTHROPIC.value + + +def test_gen_ai_operation_name_chat(): + """gen_ai.operation.name must be 'chat' for Messages API spans.""" + from opentelemetry.instrumentation.anthropic import _wrap + from unittest.mock import patch, MagicMock + + tracer = MagicMock() + captured = {} + + def fake_start_span(name, kind, attributes): + captured["attributes"] = attributes + span = MagicMock() + span.is_recording.return_value = False + return span + + tracer.start_span.side_effect = fake_start_span + + to_wrap = {"span_name": "anthropic.chat"} + wrapped_fn = MagicMock(return_value=None) + + with patch("opentelemetry.context.get_value", return_value=False): + fn = _wrap(tracer, None, None, None, None, None, to_wrap) + fn(wrapped_fn, MagicMock(), [], {"model": "claude-3-opus-20240229", "messages": [], "max_tokens": 10}) + + assert GenAIAttributes.GEN_AI_OPERATION_NAME in captured["attributes"], ( + "gen_ai.operation.name must be set" + ) + assert captured["attributes"][GenAIAttributes.GEN_AI_OPERATION_NAME] == GenAiOperationNameValues.CHAT.value + + +def test_gen_ai_operation_name_completion(): + """gen_ai.operation.name must be 'text_completion' for Completions API spans.""" + from opentelemetry.instrumentation.anthropic import _wrap + from unittest.mock import patch, MagicMock + + tracer = MagicMock() + captured = {} + + def fake_start_span(name, kind, attributes): + captured["attributes"] = attributes + span = MagicMock() + span.is_recording.return_value = False + return span + + tracer.start_span.side_effect = fake_start_span + + to_wrap = {"span_name": "anthropic.completion"} + wrapped_fn = MagicMock(return_value=None) + + with patch("opentelemetry.context.get_value", return_value=False): + fn = _wrap(tracer, None, None, None, None, None, to_wrap) + fn(wrapped_fn, MagicMock(), [], {"model": "claude-2", "prompt": "Hello", "max_tokens_to_sample": 100}) + + assert ( + captured["attributes"][GenAIAttributes.GEN_AI_OPERATION_NAME] + == GenAiOperationNameValues.TEXT_COMPLETION.value + ) + + +# --------------------------------------------------------------------------- +# Streaming tool_calls.arguments JSON serialization test (#6) +# --------------------------------------------------------------------------- + +def test_streaming_tool_arguments_are_json_serialized(): + """Streaming tool_calls.arguments must be JSON-serialized like non-streaming paths.""" + span = make_span() + events = [ + { + "type": "tool_use", + "id": "tool_abc", + "name": "get_weather", + "input": {"location": "Boston", "unit": "celsius"}, + "finish_reason": "tool_use", + "index": 0, + } + ] + set_streaming_response_attributes(span, events) + + output = json.loads(span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + tc = output[0]["tool_calls"][0] + assert isinstance(tc["arguments"], str), ( + "tool_calls.arguments must be a JSON string, not a raw dict" + ) + parsed = json.loads(tc["arguments"]) + assert parsed == {"location": "Boston", "unit": "celsius"} + + +def test_streaming_tool_arguments_not_double_encoded_when_input_is_string(): + """Streaming input arrives as an accumulated JSON string (from partial_json deltas). + Calling json.dumps on it again would double-encode it — arguments must remain + parseable as a plain JSON object, not a JSON-encoded string. + """ + span = make_span() + # input is a string here — exactly as streaming.py produces it after + # accumulating input_json_delta fragments + events = [ + { + "type": "tool_use", + "id": "tool_abc", + "name": "get_weather", + "input": '{"location": "Boston", "unit": "celsius"}', + "finish_reason": "tool_use", + "index": 0, + } + ] + set_streaming_response_attributes(span, events) + + output = json.loads(span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + tc = output[0]["tool_calls"][0] + assert isinstance(tc["arguments"], str), "arguments must be a string" + parsed = json.loads(tc["arguments"]) + assert parsed == {"location": "Boston", "unit": "celsius"}, ( + "arguments must parse to the original dict, not a double-encoded string" + ) + + +# --------------------------------------------------------------------------- +# max_tokens fallback test (#2) +# --------------------------------------------------------------------------- + +def test_max_tokens_set_for_messages_api(): + """gen_ai.request.max_tokens must be set from max_tokens (Messages API).""" + span = make_span() + kwargs = { + "model": "claude-3-opus-20240229", + "messages": [{"role": "user", "content": "Hello"}], + "max_tokens": 512, + } + asyncio.run(aset_input_attributes(span, kwargs)) + + assert GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS in span.attributes, ( + "gen_ai.request.max_tokens must be set for Messages API calls" + ) + assert span.attributes[GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS] == 512 + + +def test_max_tokens_set_for_completions_api(): + """gen_ai.request.max_tokens must be set from max_tokens_to_sample (legacy Completions API).""" + span = make_span() + kwargs = { + "model": "claude-2", + "prompt": "Hello", + "max_tokens_to_sample": 256, + } + asyncio.run(aset_input_attributes(span, kwargs)) + + assert GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS in span.attributes, ( + "gen_ai.request.max_tokens must be set for Completions API calls" + ) + assert span.attributes[GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS] == 256 + + +def test_max_tokens_to_sample_takes_precedence_when_both_provided(): + """When both max_tokens_to_sample and max_tokens are present, + max_tokens_to_sample (legacy) wins via `or` short-circuit. + This documents the current behaviour so a future change would be intentional. + """ + span = make_span() + kwargs = { + "model": "claude-2", + "prompt": "Hello", + "max_tokens_to_sample": 100, + "max_tokens": 512, + } + asyncio.run(aset_input_attributes(span, kwargs)) + + assert span.attributes[GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS] == 100, ( + "max_tokens_to_sample takes precedence over max_tokens when both are set" + ) + + +# --------------------------------------------------------------------------- +# Async finish_reasons with content tracing disabled (#2 async path) +# --------------------------------------------------------------------------- + +def test_async_finish_reasons_set_when_content_tracing_disabled(): + """_aset_span_completions must record finish_reasons even when content tracing is off.""" + from opentelemetry.instrumentation.anthropic.span_utils import aset_response_attributes + + os.environ[TRACELOOP_TRACE_CONTENT] = "false" + + span = make_span() + response = _make_response([_make_text_block("Secret content")], stop_reason="end_turn") + asyncio.run(aset_response_attributes(span, response)) + + assert GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS in span.attributes, ( + "async path: finish_reasons must be set regardless of content tracing" + ) + assert "end_turn" in span.attributes[GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS] + assert GenAIAttributes.GEN_AI_OUTPUT_MESSAGES not in span.attributes + + +# --------------------------------------------------------------------------- +# _awrap span identity attributes (#4 async path) +# --------------------------------------------------------------------------- + +def test_awrap_gen_ai_provider_name_and_operation_name(): + """_awrap must set gen_ai.provider.name and gen_ai.operation.name same as _wrap.""" + import asyncio as _asyncio + from opentelemetry.instrumentation.anthropic import _awrap + from unittest.mock import patch, MagicMock, AsyncMock + + tracer = MagicMock() + captured = {} + + def fake_start_span(name, kind, attributes): + captured["attributes"] = attributes + span = MagicMock() + span.is_recording.return_value = False + return span + + tracer.start_span.side_effect = fake_start_span + + to_wrap = {"span_name": "anthropic.chat"} + wrapped_fn = AsyncMock(return_value=None) + + async def run(): + with patch("opentelemetry.context.get_value", return_value=False): + fn = _awrap(tracer, None, None, None, None, None, to_wrap) + await fn(wrapped_fn, MagicMock(), [], {"model": "claude-3-opus-20240229", "messages": [], "max_tokens": 10}) + + _asyncio.run(run()) + + assert captured["attributes"][GenAIAttributes.GEN_AI_PROVIDER_NAME] == GenAiSystemValues.ANTHROPIC.value + assert captured["attributes"][GenAIAttributes.GEN_AI_OPERATION_NAME] == GenAiOperationNameValues.CHAT.value + + +# --------------------------------------------------------------------------- +# Multiple tool_use blocks in one input message +# --------------------------------------------------------------------------- + +def test_multiple_tool_use_blocks_in_single_message(): + """Multiple tool_use blocks in one message must all appear in tool_calls, + none in content, and content must be None. + """ + span = make_span() + kwargs = { + "model": "claude-3-opus-20240229", + "messages": [ + {"role": "user", "content": "What's the weather in Boston and NYC?"}, + { + "role": "assistant", + "content": [ + { + "type": "tool_use", + "id": "tool_1", + "name": "get_weather", + "input": {"location": "Boston"}, + }, + { + "type": "tool_use", + "id": "tool_2", + "name": "get_weather", + "input": {"location": "New York"}, + }, + ], + }, + ], + "max_tokens": 1024, + } + asyncio.run(aset_input_attributes(span, kwargs)) + + messages = json.loads(span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assistant_msg = messages[1] + + assert len(assistant_msg["tool_calls"]) == 2 + ids = {tc["id"] for tc in assistant_msg["tool_calls"]} + assert ids == {"tool_1", "tool_2"} + assert assistant_msg.get("content") is None, ( + "content must be None when all blocks are tool_use" + ) diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/test_structured_outputs.py b/packages/opentelemetry-instrumentation-anthropic/tests/test_structured_outputs.py index d1c70d6570..5171a747a0 100644 --- a/packages/opentelemetry-instrumentation-anthropic/tests/test_structured_outputs.py +++ b/packages/opentelemetry-instrumentation-anthropic/tests/test_structured_outputs.py @@ -50,19 +50,12 @@ def test_anthropic_structured_outputs_legacy( assert spans[0].name == "anthropic.chat" anthropic_span = spans[0] - assert ( - anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.content"] - == "Tell me a joke about OpenTelemetry and rate it from 1 to 10" - ) - assert anthropic_span.attributes[f"{GenAIAttributes.GEN_AI_PROMPT}.0.role"] == "user" - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content") - == response.content[0].text - ) - assert ( - anthropic_span.attributes.get(f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role") - == "assistant" - ) + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["content"] == "Tell me a joke about OpenTelemetry and rate it from 1 to 10" + assert input_messages[0]["role"] == "user" + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert output_messages[-1]["content"] == response.content[0].text + assert output_messages[-1]["role"] == "assistant" assert "gen_ai.request.structured_output_schema" in anthropic_span.attributes schema_attr = json.loads( diff --git a/packages/opentelemetry-instrumentation-anthropic/tests/test_thinking.py b/packages/opentelemetry-instrumentation-anthropic/tests/test_thinking.py index 611691a76c..aaf3df9d56 100644 --- a/packages/opentelemetry-instrumentation-anthropic/tests/test_thinking.py +++ b/packages/opentelemetry-instrumentation-anthropic/tests/test_thinking.py @@ -1,3 +1,4 @@ +import json import pytest from opentelemetry.sdk._logs import ReadableLogRecord from opentelemetry.semconv._incubating.attributes import ( @@ -44,20 +45,15 @@ def test_anthropic_thinking_legacy( anthropic_span = spans[0] assert anthropic_span.name == "anthropic.chat" - assert anthropic_span.attributes["gen_ai.prompt.0.role"] == "user" - assert anthropic_span.attributes["gen_ai.prompt.0.content"] == prompt + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["role"] == "user" + assert input_messages[0]["content"] == prompt - assert anthropic_span.attributes["gen_ai.completion.0.role"] == "thinking" - assert ( - anthropic_span.attributes["gen_ai.completion.0.content"] - == response.content[0].thinking - ) - - assert anthropic_span.attributes["gen_ai.completion.1.role"] == "assistant" - assert ( - anthropic_span.attributes["gen_ai.completion.1.content"] - == response.content[1].text - ) + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + thinking_msg = next(m for m in output_messages if m.get("role") == "thinking") + assert thinking_msg["content"] == response.content[0].thinking + assistant_msg = next(m for m in output_messages if m.get("role") == "assistant") + assert assistant_msg["content"] == response.content[1].text metrics_data = reader.get_metrics_data() resource_metrics = metrics_data.resource_metrics @@ -247,20 +243,15 @@ async def test_async_anthropic_thinking_legacy( anthropic_span = spans[0] assert anthropic_span.name == "anthropic.chat" - assert anthropic_span.attributes["gen_ai.prompt.0.role"] == "user" - assert anthropic_span.attributes["gen_ai.prompt.0.content"] == prompt + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["role"] == "user" + assert input_messages[0]["content"] == prompt - assert anthropic_span.attributes["gen_ai.completion.0.role"] == "thinking" - assert ( - anthropic_span.attributes["gen_ai.completion.0.content"] - == response.content[0].thinking - ) - - assert anthropic_span.attributes["gen_ai.completion.1.role"] == "assistant" - assert ( - anthropic_span.attributes["gen_ai.completion.1.content"] - == response.content[1].text - ) + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + thinking_msg = next(m for m in output_messages if m.get("role") == "thinking") + assert thinking_msg["content"] == response.content[0].thinking + assistant_msg = next(m for m in output_messages if m.get("role") == "assistant") + assert assistant_msg["content"] == response.content[1].text metrics_data = reader.get_metrics_data() resource_metrics = metrics_data.resource_metrics @@ -466,13 +457,14 @@ def test_anthropic_thinking_streaming_legacy( anthropic_span = spans[0] assert anthropic_span.name == "anthropic.chat" - assert anthropic_span.attributes["gen_ai.prompt.0.role"] == "user" - assert anthropic_span.attributes["gen_ai.prompt.0.content"] == prompt + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["role"] == "user" + assert input_messages[0]["content"] == prompt - assert anthropic_span.attributes["gen_ai.completion.0.role"] == "thinking" - - assert anthropic_span.attributes["gen_ai.completion.1.role"] == "assistant" - assert anthropic_span.attributes["gen_ai.completion.1.content"] == text + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert any(m.get("role") == "thinking" for m in output_messages) + assistant_msg = next(m for m in output_messages if m.get("role") == "assistant") + assert assistant_msg["content"] == text metrics_data = reader.get_metrics_data() resource_metrics = metrics_data.resource_metrics @@ -704,13 +696,14 @@ async def test_async_anthropic_thinking_streaming_legacy( anthropic_span = spans[0] assert anthropic_span.name == "anthropic.chat" - assert anthropic_span.attributes["gen_ai.prompt.0.role"] == "user" - assert anthropic_span.attributes["gen_ai.prompt.0.content"] == prompt - - assert anthropic_span.attributes["gen_ai.completion.0.role"] == "thinking" - - assert anthropic_span.attributes["gen_ai.completion.1.role"] == "assistant" - assert anthropic_span.attributes["gen_ai.completion.1.content"] == text + input_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_INPUT_MESSAGES]) + assert input_messages[0]["role"] == "user" + assert input_messages[0]["content"] == prompt + + output_messages = json.loads(anthropic_span.attributes[GenAIAttributes.GEN_AI_OUTPUT_MESSAGES]) + assert any(m.get("role") == "thinking" for m in output_messages) + assistant_msg = next(m for m in output_messages if m.get("role") == "assistant") + assert assistant_msg["content"] == text metrics_data = reader.get_metrics_data() resource_metrics = metrics_data.resource_metrics diff --git a/packages/opentelemetry-instrumentation-anthropic/uv.lock b/packages/opentelemetry-instrumentation-anthropic/uv.lock index b243246fdd..d83eb04e2c 100644 --- a/packages/opentelemetry-instrumentation-anthropic/uv.lock +++ b/packages/opentelemetry-instrumentation-anthropic/uv.lock @@ -348,7 +348,7 @@ wheels = [ [[package]] name = "opentelemetry-instrumentation-anthropic" -version = "0.52.6" +version = "0.53.3" source = { editable = "." } dependencies = [ { name = "opentelemetry-api" }, @@ -385,7 +385,7 @@ requires-dist = [ { name = "opentelemetry-api", specifier = ">=1.38.0,<2" }, { name = "opentelemetry-instrumentation", specifier = ">=0.59b0" }, { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0" }, - { name = "opentelemetry-semantic-conventions-ai", specifier = ">=0.4.14,<0.5.0" }, + { name = "opentelemetry-semantic-conventions-ai", editable = "../opentelemetry-semantic-conventions-ai" }, ] provides-extras = ["instruments"] @@ -435,15 +435,25 @@ wheels = [ [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.14" -source = { registry = "https://pypi.org/simple" } +version = "0.5.0" +source = { editable = "../opentelemetry-semantic-conventions-ai" } dependencies = [ { name = "opentelemetry-sdk" }, { name = "opentelemetry-semantic-conventions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/74/33/f77151a8c9bf93094074533ea751305e9f3fec1a4197b0f218d09cb8dce2/opentelemetry_semantic_conventions_ai-0.4.14.tar.gz", hash = "sha256:0495774011933010db7dbfa5111a2fa649edeedef922e39c898154c81eae89d8", size = 18418, upload-time = "2026-02-22T20:25:34.42Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/56/d5/cdc62ce0f7357cd91682bb1e31d7a68a3c0da5abdbd8c69ffa9aec555f1b/opentelemetry_semantic_conventions_ai-0.4.14-py3-none-any.whl", hash = "sha256:218e0bf656b1d459c5bc608e2a30272b7ab0a4a5b69c1bd5b659c3918f4ad144", size = 5824, upload-time = "2026-02-22T20:25:33.307Z" }, + +[package.metadata] +requires-dist = [ + { name = "opentelemetry-sdk", specifier = ">=1.38.0,<2" }, + { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "autopep8", specifier = ">=2.2.0,<3" }, + { name = "pytest", specifier = ">=8.2.2,<9" }, + { name = "pytest-sugar", specifier = "==1.0.0" }, + { name = "ruff", specifier = ">=0.4.0" }, ] [[package]] diff --git a/packages/opentelemetry-instrumentation-writer/pyproject.toml b/packages/opentelemetry-instrumentation-writer/pyproject.toml index df9d3e8a7a..fe26396fcd 100644 --- a/packages/opentelemetry-instrumentation-writer/pyproject.toml +++ b/packages/opentelemetry-instrumentation-writer/pyproject.toml @@ -12,7 +12,7 @@ requires-python = ">=3.10,<4" dependencies = [ "opentelemetry-api>=1.38.0,<2", "opentelemetry-instrumentation>=0.59b0", - "opentelemetry-semantic-conventions-ai>=0.4.11", + "opentelemetry-semantic-conventions-ai>=0.4.11,<0.5.0", "opentelemetry-semantic-conventions>=0.59b0", ] diff --git a/packages/opentelemetry-semantic-conventions-ai/MIGRATION.md b/packages/opentelemetry-semantic-conventions-ai/MIGRATION.md new file mode 100644 index 0000000000..dd0426a315 --- /dev/null +++ b/packages/opentelemetry-semantic-conventions-ai/MIGRATION.md @@ -0,0 +1,189 @@ +# Migration Guide: opentelemetry-semantic-conventions-ai v0.4.x → v0.5.x + +This guide covers breaking changes introduced when aligning the `opentelemetry-semantic-conventions-ai` +package with the upstream [OTel GenAI semantic conventions](https://opentelemetry.io/docs/specs/semconv/gen-ai/). + +--- + +## 1. Removed constants (previously duplicated upstream) + +These `SpanAttributes` constants have been **removed**. They are now part of the official +`opentelemetry-semantic-conventions` package. Import them directly from upstream. + +```python +# Before +from opentelemetry.semconv_ai import SpanAttributes +span.set_attribute(SpanAttributes.LLM_SYSTEM, "openai") + +# After +from opentelemetry.semconv._incubating.attributes import gen_ai_attributes as GenAIAttributes +span.set_attribute(GenAIAttributes.GEN_AI_SYSTEM, "openai") +``` + +| Removed constant | Upstream replacement | +|---|---| +| `SpanAttributes.LLM_SYSTEM` | `GenAIAttributes.GEN_AI_SYSTEM` | +| `SpanAttributes.LLM_REQUEST_MODEL` | `GenAIAttributes.GEN_AI_REQUEST_MODEL` | +| `SpanAttributes.LLM_REQUEST_MAX_TOKENS` | `GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS` | +| `SpanAttributes.LLM_REQUEST_TEMPERATURE` | `GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE` | +| `SpanAttributes.LLM_REQUEST_TOP_P` | `GenAIAttributes.GEN_AI_REQUEST_TOP_P` | +| `SpanAttributes.LLM_TOP_K` | `GenAIAttributes.GEN_AI_REQUEST_TOP_K` | +| `SpanAttributes.LLM_CHAT_STOP_SEQUENCES` | `GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES` | +| `SpanAttributes.LLM_FREQUENCY_PENALTY` | `GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY` | +| `SpanAttributes.LLM_PRESENCE_PENALTY` | `GenAIAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY` | +| `SpanAttributes.LLM_RESPONSE_MODEL` | `GenAIAttributes.GEN_AI_RESPONSE_MODEL` | +| `SpanAttributes.LLM_USAGE_COMPLETION_TOKENS` | `GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS` | +| `SpanAttributes.LLM_USAGE_PROMPT_TOKENS` | `GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS` | +| `SpanAttributes.LLM_TOKEN_TYPE` | `GenAIAttributes.GEN_AI_TOKEN_TYPE` | +| `SpanAttributes.LLM_REQUEST_FUNCTIONS` | `GenAIAttributes.GEN_AI_TOOL_DEFINITIONS` | +| `SpanAttributes.LLM_PROMPTS` | `GenAIAttributes.GEN_AI_PROMPT` | +| `SpanAttributes.LLM_COMPLETIONS` | `GenAIAttributes.GEN_AI_COMPLETION` | +| `SpanAttributes.LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT` | `GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT` | +| `SpanAttributes.LLM_REQUEST_TYPE` | `GenAIAttributes.GEN_AI_OPERATION_NAME` | + +> **Note on `LLM_REQUEST_TYPE`**: The old `LLMRequestTypeValues` enum is replaced by +> `GenAiOperationNameValues` from upstream, or by `GenAICustomOperationName` for +> project-specific operation names. + +--- + +## 2. Renamed constants (stay in `SpanAttributes`, new `GEN_AI_*` prefix) + +These constants remain in the `opentelemetry-semantic-conventions-ai` package but their +Python names have been renamed from `LLM_*` to `GEN_AI_*`. + +```python +# Before +from opentelemetry.semconv_ai import SpanAttributes +span.set_attribute(SpanAttributes.LLM_IS_STREAMING, True) + +# After +from opentelemetry.semconv_ai import SpanAttributes +span.set_attribute(SpanAttributes.GEN_AI_IS_STREAMING, True) +``` + +| Old name | New name | +|---|---| +| `SpanAttributes.LLM_USAGE_TOTAL_TOKENS` | `SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS` | +| `SpanAttributes.LLM_USER` | `SpanAttributes.GEN_AI_USER` | +| `SpanAttributes.LLM_HEADERS` | `SpanAttributes.GEN_AI_HEADERS` | +| `SpanAttributes.LLM_IS_STREAMING` | `SpanAttributes.GEN_AI_IS_STREAMING` | +| `SpanAttributes.LLM_REQUEST_REPETITION_PENALTY` | `SpanAttributes.GEN_AI_REQUEST_REPETITION_PENALTY` | +| `SpanAttributes.LLM_REQUEST_REASONING_EFFORT` | `SpanAttributes.GEN_AI_REQUEST_REASONING_EFFORT` | +| `SpanAttributes.LLM_REQUEST_REASONING_SUMMARY` | `SpanAttributes.GEN_AI_REQUEST_REASONING_SUMMARY` | +| `SpanAttributes.LLM_RESPONSE_REASONING_EFFORT` | `SpanAttributes.GEN_AI_RESPONSE_REASONING_EFFORT` | +| `SpanAttributes.LLM_RESPONSE_FINISH_REASON` | `SpanAttributes.GEN_AI_RESPONSE_FINISH_REASON` | +| `SpanAttributes.LLM_RESPONSE_STOP_REASON` | `SpanAttributes.GEN_AI_RESPONSE_STOP_REASON` | +| `SpanAttributes.LLM_CONTENT_COMPLETION_CHUNK` | `SpanAttributes.GEN_AI_CONTENT_COMPLETION_CHUNK` | +| `SpanAttributes.LLM_USAGE_REASONING_TOKENS` | `SpanAttributes.GEN_AI_USAGE_REASONING_TOKENS` | +| `SpanAttributes.LLM_USAGE_TOKEN_TYPE` | `SpanAttributes.GEN_AI_USAGE_TOKEN_TYPE` | +| `SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS` | `SpanAttributes.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS` ¹ | +| `SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS` | `SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS` ¹ | +| `SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA` | `SpanAttributes.GEN_AI_REQUEST_STRUCTURED_OUTPUT_SCHEMA` | +| `SpanAttributes.LLM_OPENAI_API_BASE` | `SpanAttributes.GEN_AI_OPENAI_API_BASE` | +| `SpanAttributes.LLM_OPENAI_API_VERSION` | `SpanAttributes.GEN_AI_OPENAI_API_VERSION` | +| `SpanAttributes.LLM_OPENAI_API_TYPE` | `SpanAttributes.GEN_AI_OPENAI_API_TYPE` | +| `SpanAttributes.LLM_DECODING_METHOD` | `SpanAttributes.GEN_AI_WATSONX_DECODING_METHOD` | +| `SpanAttributes.LLM_RANDOM_SEED` | `SpanAttributes.GEN_AI_WATSONX_RANDOM_SEED` | +| `SpanAttributes.LLM_MAX_NEW_TOKENS` | `SpanAttributes.GEN_AI_WATSONX_MAX_NEW_TOKENS` | +| `SpanAttributes.LLM_MIN_NEW_TOKENS` | `SpanAttributes.GEN_AI_WATSONX_MIN_NEW_TOKENS` | +| `SpanAttributes.LLM_REPETITION_PENALTY` | `SpanAttributes.GEN_AI_WATSONX_REPETITION_PENALTY` | + +> ¹ The string value of these two cache-token attributes **also changed** — see [section 3](#cache-token-attributes). + +--- + +## 3. Changed string values + +Some constants kept their Python name but the underlying **string value** changed. + +### Cache token attributes + +| Python name | Old string value | New string value | +|---|---|---| +| `SpanAttributes.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS` | `gen_ai.usage.cache_creation_input_tokens` | `gen_ai.usage.cache_creation.input_tokens` | +| `SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS` | `gen_ai.usage.cache_read_input_tokens` | `gen_ai.usage.cache_read.input_tokens` | + +> **Dashboard impact**: Update any Grafana queries, alerts, or OTLP processors that filter on +> these attribute names. + +### `GenAISystem` values + +All `GenAISystem` enum values now use the OTel spec canonical form (lowercase / dot-separated). + +| Enum member | Old value | New value | +|---|---|---| +| `GenAISystem.ANTHROPIC` | `"Anthropic"` | `"anthropic"` | +| `GenAISystem.COHERE` | `"Cohere"` | `"cohere"` | +| `GenAISystem.MISTRALAI` | `"MistralAI"` | `"mistral_ai"` | +| `GenAISystem.OLLAMA` | `"Ollama"` | `"ollama"` | +| `GenAISystem.GROQ` | `"Groq"` | `"groq"` | +| `GenAISystem.ALEPH_ALPHA` | `"AlephAlpha"` | `"aleph_alpha"` | +| `GenAISystem.REPLICATE` | `"Replicate"` | `"replicate"` | +| `GenAISystem.TOGETHER_AI` | `"TogetherAI"` | `"together_ai"` | +| `GenAISystem.WATSONX` | `"Watsonx"` | `"ibm.watsonx.ai"` | +| `GenAISystem.HUGGINGFACE` | `"HuggingFace"` | `"hugging_face"` | +| `GenAISystem.FIREWORKS` | `"Fireworks"` | `"fireworks"` | +| `GenAISystem.AZURE` | `"Azure"` | `"az.ai.openai"` | +| `GenAISystem.AWS` | `"AWS"` | `"aws.bedrock"` | +| `GenAISystem.GOOGLE` | `"Google"` | `"gcp.gen_ai"` | +| `GenAISystem.OPENROUTER` | `"OpenRouter"` | `"openrouter"` | +| `GenAISystem.LANGCHAIN` | `"Langchain"` | `"langchain"` | + +> `GenAISystem.OPENAI` (`"openai"`) is unchanged. + +> **Dashboard impact**: Update dashboards, alerts, and OTLP processors that filter on +> `gen_ai.system` to use the new lowercase values shown above. + +--- + +## 4. Tool definitions format change + +Tool definitions are now encoded as a **single JSON-array attribute** instead of per-field +indexed sub-attributes. + +```python +# Before — multiple flat attributes +span.set_attribute("gen_ai.tool.definitions.0.name", "my_tool") +span.set_attribute("gen_ai.tool.definitions.0.description", "Does something") +span.set_attribute("gen_ai.tool.definitions.0.parameters", json.dumps({...})) + +# After — one JSON array attribute +import json +tool_defs = [ + { + "name": "my_tool", + "description": "Does something", + "parameters": {...}, + } +] +span.set_attribute(GenAIAttributes.GEN_AI_TOOL_DEFINITIONS, json.dumps(tool_defs)) +``` + +> **Dashboard impact**: Dashboards that expand `gen_ai.tool.definitions.{i}.name` as individual +> attributes will no longer find them. Parse the JSON value of `gen_ai.tool.definitions` instead. + +--- + +## 5. Quickstart: minimal import update + +```python +# Before +from opentelemetry.semconv_ai import SpanAttributes + +SpanAttributes.LLM_SYSTEM # removed +SpanAttributes.LLM_REQUEST_MODEL # removed +SpanAttributes.LLM_REQUEST_TYPE # removed +SpanAttributes.LLM_IS_STREAMING # renamed +SpanAttributes.LLM_USAGE_TOTAL_TOKENS # renamed + +# After +from opentelemetry.semconv_ai import SpanAttributes +from opentelemetry.semconv._incubating.attributes import gen_ai_attributes as GenAIAttributes + +GenAIAttributes.GEN_AI_SYSTEM # upstream +GenAIAttributes.GEN_AI_REQUEST_MODEL # upstream +GenAIAttributes.GEN_AI_OPERATION_NAME # upstream +SpanAttributes.GEN_AI_IS_STREAMING # project semconv (renamed) +SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS # project semconv (renamed) +``` diff --git a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py index 8bb286d0e9..1bc3fec034 100644 --- a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py +++ b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py @@ -7,29 +7,31 @@ class GenAISystem(Enum): """ Supported LLM vendor (System) names used across OpenLLMetry instrumentations. - These values match the actual strings used in span attributes (LLM_SYSTEM) - throughout the instrumentation packages. + Values that have a counterpart in the official OTel GenAI semantic conventions + (opentelemetry.semconv._incubating.attributes.gen_ai_attributes.GenAiSystemValues) + use the spec-defined lowercase string. Values without an OTel counterpart use + lowercase-with-underscores as a project convention. """ OPENAI = "openai" - ANTHROPIC = "Anthropic" - COHERE = "Cohere" - MISTRALAI = "MistralAI" - OLLAMA = "Ollama" - GROQ = "Groq" - ALEPH_ALPHA = "AlephAlpha" - REPLICATE = "Replicate" - TOGETHER_AI = "TogetherAI" - WATSONX = "Watsonx" - HUGGINGFACE = "HuggingFace" - FIREWORKS = "Fireworks" - - AZURE = "Azure" - AWS = "AWS" - GOOGLE = "Google" - OPENROUTER = "OpenRouter" - - LANGCHAIN = "Langchain" + ANTHROPIC = "anthropic" + COHERE = "cohere" + MISTRALAI = "mistral_ai" + OLLAMA = "ollama" + GROQ = "groq" + ALEPH_ALPHA = "aleph_alpha" + REPLICATE = "replicate" + TOGETHER_AI = "together_ai" + WATSONX = "ibm.watsonx.ai" + HUGGINGFACE = "hugging_face" + FIREWORKS = "fireworks" + + AZURE = "az.ai.openai" + AWS = "aws.bedrock" + GOOGLE = "gcp.gen_ai" + OPENROUTER = "openrouter" + + LANGCHAIN = "langchain" CREWAI = "crewai" @@ -62,52 +64,32 @@ class Meters: class SpanAttributes: - # GenAI Usage Cache Attributes (missing from incubating semantic conventions) - GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS = "gen_ai.usage.cache_creation_input_tokens" - GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS = "gen_ai.usage.cache_read_input_tokens" - - # LLM Cache Attributes (legacy naming - keeping for backward compatibility) - LLM_SYSTEM = "gen_ai.system" - LLM_REQUEST_MODEL = "gen_ai.request.model" - LLM_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens" - LLM_REQUEST_TEMPERATURE = "gen_ai.request.temperature" - LLM_REQUEST_TOP_P = "gen_ai.request.top_p" - LLM_PROMPTS = "gen_ai.prompt" - LLM_COMPLETIONS = "gen_ai.completion" - LLM_RESPONSE_MODEL = "gen_ai.response.model" - LLM_USAGE_COMPLETION_TOKENS = "gen_ai.usage.completion_tokens" - LLM_USAGE_PROMPT_TOKENS = "gen_ai.usage.prompt_tokens" - LLM_USAGE_CACHE_CREATION_INPUT_TOKENS = "gen_ai.usage.cache_creation_input_tokens" - LLM_USAGE_CACHE_READ_INPUT_TOKENS = "gen_ai.usage.cache_read_input_tokens" - LLM_TOKEN_TYPE = "gen_ai.token.type" - LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA = "gen_ai.request.structured_output_schema" - LLM_REQUEST_REASONING_SUMMARY = "gen_ai.request.reasoning_summary" - LLM_RESPONSE_REASONING_EFFORT = "gen_ai.response.reasoning_effort" - - # LLM - LLM_REQUEST_TYPE = "llm.request.type" - LLM_USAGE_TOTAL_TOKENS = "llm.usage.total_tokens" - LLM_USAGE_TOKEN_TYPE = "llm.usage.token_type" - LLM_USER = "llm.user" - LLM_HEADERS = "llm.headers" - LLM_TOP_K = "llm.top_k" - LLM_IS_STREAMING = "llm.is_streaming" - LLM_FREQUENCY_PENALTY = "llm.frequency_penalty" - LLM_PRESENCE_PENALTY = "llm.presence_penalty" - LLM_CHAT_STOP_SEQUENCES = "llm.chat.stop_sequences" - LLM_REQUEST_FUNCTIONS = "llm.request.functions" - LLM_REQUEST_REPETITION_PENALTY = "llm.request.repetition_penalty" - LLM_RESPONSE_FINISH_REASON = "llm.response.finish_reason" - LLM_RESPONSE_STOP_REASON = "llm.response.stop_reason" - LLM_CONTENT_COMPLETION_CHUNK = "llm.content.completion.chunk" - LLM_REQUEST_REASONING_EFFORT = "llm.request.reasoning_effort" - LLM_USAGE_REASONING_TOKENS = "llm.usage.reasoning_tokens" + # GenAI Usage Cache Attributes (not yet in upstream OTel incubating semconv) + GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS = "gen_ai.usage.cache_creation.input_tokens" + GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS = "gen_ai.usage.cache_read.input_tokens" + + # LLM — project-policy attributes (not in upstream OTel spec) + GEN_AI_USAGE_TOTAL_TOKENS = "gen_ai.usage.total_tokens" + GEN_AI_USAGE_TOKEN_TYPE = "gen_ai.usage.token_type" + GEN_AI_USER = "gen_ai.user" + GEN_AI_HEADERS = "gen_ai.headers" + GEN_AI_IS_STREAMING = "gen_ai.is_streaming" + GEN_AI_REQUEST_REPETITION_PENALTY = "gen_ai.request.repetition_penalty" + GEN_AI_RESPONSE_FINISH_REASON = "gen_ai.response.finish_reason" + GEN_AI_RESPONSE_STOP_REASON = "gen_ai.response.stop_reason" + GEN_AI_CONTENT_COMPLETION_CHUNK = "gen_ai.content.completion.chunk" + GEN_AI_REQUEST_REASONING_EFFORT = "gen_ai.request.reasoning_effort" + GEN_AI_USAGE_REASONING_TOKENS = "gen_ai.usage.reasoning_tokens" + GEN_AI_REQUEST_N = "gen_ai.request.n" + GEN_AI_REQUEST_MAX_COMPLETION_TOKENS = "gen_ai.request.max_completion_tokens" + GEN_AI_REQUEST_STRUCTURED_OUTPUT_SCHEMA = "gen_ai.request.structured_output_schema" + GEN_AI_REQUEST_REASONING_SUMMARY = "gen_ai.request.reasoning_summary" + GEN_AI_RESPONSE_REASONING_EFFORT = "gen_ai.response.reasoning_effort" # OpenAI - LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT = "gen_ai.openai.system_fingerprint" - LLM_OPENAI_API_BASE = "gen_ai.openai.api_base" - LLM_OPENAI_API_VERSION = "gen_ai.openai.api_version" - LLM_OPENAI_API_TYPE = "gen_ai.openai.api_type" + GEN_AI_OPENAI_API_BASE = "gen_ai.openai.api_base" + GEN_AI_OPENAI_API_VERSION = "gen_ai.openai.api_version" + GEN_AI_OPENAI_API_TYPE = "gen_ai.openai.api_type" # Haystack HAYSTACK_OPENAI_CHAT = "haystack.openai.chat" @@ -152,11 +134,11 @@ class SpanAttributes: TRACELOOP_CORRELATION_ID = "traceloop.correlation.id" # Watson/genai LLM - LLM_DECODING_METHOD = "llm.watsonx.decoding_method" - LLM_RANDOM_SEED = "llm.watsonx.random_seed" - LLM_MAX_NEW_TOKENS = "llm.watsonx.max_new_tokens" - LLM_MIN_NEW_TOKENS = "llm.watsonx.min_new_tokens" - LLM_REPETITION_PENALTY = "llm.watsonx.repetition_penalty" + GEN_AI_WATSONX_DECODING_METHOD = "llm.watsonx.decoding_method" + GEN_AI_WATSONX_RANDOM_SEED = "llm.watsonx.random_seed" + GEN_AI_WATSONX_MAX_NEW_TOKENS = "llm.watsonx.max_new_tokens" + GEN_AI_WATSONX_MIN_NEW_TOKENS = "llm.watsonx.min_new_tokens" + GEN_AI_WATSONX_REPETITION_PENALTY = "llm.watsonx.repetition_penalty" # Chroma db CHROMADB_ADD_IDS_COUNT = "db.chroma.add.ids_count" diff --git a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/_testing.py b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/_testing.py new file mode 100644 index 0000000000..bf203ab819 --- /dev/null +++ b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/_testing.py @@ -0,0 +1,383 @@ +""" +Shared compliance test classes for opentelemetry-semantic-conventions-ai. + +Import these classes in any instrumentation package's test suite to verify +that the installed semconv constants have the expected values: + + from opentelemetry.semconv_ai._testing import * # noqa: F401, F403 + +pytest will discover and run all Test* classes that end up in the module +namespace, so a single import line is enough. +""" + +import pytest +from opentelemetry.semconv_ai import GenAISystem, Meters, SpanAttributes + + +# --------------------------------------------------------------------------- +# SpanAttributes — renamed constants (LLM_* → GEN_AI_*) +# --------------------------------------------------------------------------- + + +class TestSpanAttributesGENAIRenamed: + """Verify all renamed LLM_* → GEN_AI_* constants have the correct string values.""" + + def test_gen_ai_usage_total_tokens(self): + assert SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS == "gen_ai.usage.total_tokens" + + def test_gen_ai_usage_token_type(self): + assert SpanAttributes.GEN_AI_USAGE_TOKEN_TYPE == "gen_ai.usage.token_type" + + def test_gen_ai_user(self): + assert SpanAttributes.GEN_AI_USER == "gen_ai.user" + + def test_gen_ai_headers(self): + assert SpanAttributes.GEN_AI_HEADERS == "gen_ai.headers" + + def test_gen_ai_is_streaming(self): + assert SpanAttributes.GEN_AI_IS_STREAMING == "gen_ai.is_streaming" + + def test_gen_ai_request_repetition_penalty(self): + assert SpanAttributes.GEN_AI_REQUEST_REPETITION_PENALTY == "gen_ai.request.repetition_penalty" + + def test_gen_ai_response_finish_reason(self): + assert SpanAttributes.GEN_AI_RESPONSE_FINISH_REASON == "gen_ai.response.finish_reason" + + def test_gen_ai_response_stop_reason(self): + assert SpanAttributes.GEN_AI_RESPONSE_STOP_REASON == "gen_ai.response.stop_reason" + + def test_gen_ai_content_completion_chunk(self): + assert SpanAttributes.GEN_AI_CONTENT_COMPLETION_CHUNK == "gen_ai.content.completion.chunk" + + def test_gen_ai_request_reasoning_effort(self): + assert SpanAttributes.GEN_AI_REQUEST_REASONING_EFFORT == "gen_ai.request.reasoning_effort" + + def test_gen_ai_usage_reasoning_tokens(self): + assert SpanAttributes.GEN_AI_USAGE_REASONING_TOKENS == "gen_ai.usage.reasoning_tokens" + + def test_gen_ai_request_n(self): + assert SpanAttributes.GEN_AI_REQUEST_N == "gen_ai.request.n" + + def test_gen_ai_request_max_completion_tokens(self): + assert SpanAttributes.GEN_AI_REQUEST_MAX_COMPLETION_TOKENS == "gen_ai.request.max_completion_tokens" + + def test_gen_ai_request_structured_output_schema(self): + assert SpanAttributes.GEN_AI_REQUEST_STRUCTURED_OUTPUT_SCHEMA == "gen_ai.request.structured_output_schema" + + def test_gen_ai_request_reasoning_summary(self): + assert SpanAttributes.GEN_AI_REQUEST_REASONING_SUMMARY == "gen_ai.request.reasoning_summary" + + def test_gen_ai_response_reasoning_effort(self): + assert SpanAttributes.GEN_AI_RESPONSE_REASONING_EFFORT == "gen_ai.response.reasoning_effort" + + def test_gen_ai_openai_api_base(self): + assert SpanAttributes.GEN_AI_OPENAI_API_BASE == "gen_ai.openai.api_base" + + def test_gen_ai_openai_api_version(self): + assert SpanAttributes.GEN_AI_OPENAI_API_VERSION == "gen_ai.openai.api_version" + + def test_gen_ai_openai_api_type(self): + assert SpanAttributes.GEN_AI_OPENAI_API_TYPE == "gen_ai.openai.api_type" + + +# --------------------------------------------------------------------------- +# SpanAttributes — old LLM_* names must be gone +# --------------------------------------------------------------------------- + + +class TestSpanAttributesOldNamesGone: + """Assert that removed LLM_* constants no longer exist on SpanAttributes.""" + + @pytest.mark.parametrize( + "old_name", + [ + "LLM_SYSTEM", + "LLM_REQUEST_MODEL", + "LLM_REQUEST_MAX_TOKENS", + "LLM_REQUEST_TEMPERATURE", + "LLM_REQUEST_TOP_P", + "LLM_PROMPTS", + "LLM_COMPLETIONS", + "LLM_RESPONSE_MODEL", + "LLM_USAGE_COMPLETION_TOKENS", + "LLM_USAGE_PROMPT_TOKENS", + "LLM_USAGE_CACHE_CREATION_INPUT_TOKENS", + "LLM_USAGE_CACHE_READ_INPUT_TOKENS", + "LLM_TOKEN_TYPE", + "LLM_REQUEST_TYPE", + "LLM_FREQUENCY_PENALTY", + "LLM_PRESENCE_PENALTY", + "LLM_CHAT_STOP_SEQUENCES", + "LLM_REQUEST_FUNCTIONS", + "LLM_TOP_K", + "LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT", + ], + ) + def test_old_name_absent(self, old_name): + assert not hasattr(SpanAttributes, old_name), ( + f"SpanAttributes.{old_name} should have been removed. " + "Consumers should import from opentelemetry.semconv._incubating.attributes.gen_ai_attributes directly." + ) + + +# --------------------------------------------------------------------------- +# SpanAttributes — cache attributes +# --------------------------------------------------------------------------- + + +class TestSpanAttributesCacheDotSeparator: + """Cache token attributes use dot-separated sub-namespaces (spec update).""" + + def test_gen_ai_usage_cache_read_input_tokens(self): + assert SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS == "gen_ai.usage.cache_read.input_tokens" + + def test_gen_ai_usage_cache_creation_input_tokens(self): + assert SpanAttributes.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS == "gen_ai.usage.cache_creation.input_tokens" + + +# --------------------------------------------------------------------------- +# SpanAttributes — project-policy attributes use gen_ai namespace +# --------------------------------------------------------------------------- + + +class TestSpanAttributesProjectPolicy: + """Project-policy attributes (not in upstream OTel spec) use gen_ai namespace.""" + + def test_is_streaming(self): + assert SpanAttributes.GEN_AI_IS_STREAMING == "gen_ai.is_streaming" + + def test_user(self): + assert SpanAttributes.GEN_AI_USER == "gen_ai.user" + + def test_headers(self): + assert SpanAttributes.GEN_AI_HEADERS == "gen_ai.headers" + + +class TestSpanAttributesOldValuesAbsent: + """Regression: old/incorrect string values must not appear anywhere in SpanAttributes.""" + + @pytest.mark.parametrize( + "old_value", + [ + "llm.usage.total_tokens", + "llm.frequency_penalty", + "llm.presence_penalty", + "llm.is_streaming", + "llm.user", + "llm.headers", + "llm.top_k", + "llm.chat.stop_sequences", + "llm.request.functions", + "llm.request.repetition_penalty", + "llm.request.type", + "llm.usage.token_type", + "llm.response.finish_reason", + "llm.response.stop_reason", + "llm.content.completion.chunk", + "llm.request.reasoning_effort", + "llm.usage.reasoning_tokens", + "llm.chat_completions.streaming_time_to_generate", + "gen_ai.usage.cache_read_input_tokens", # underscore variant (pre-migration) + "gen_ai.usage.cache_creation_input_tokens", # underscore variant (pre-migration) + ], + ) + def test_old_value_not_in_span_attributes(self, old_value): + all_values = { + name: value + for name, value in vars(SpanAttributes).items() + if not name.startswith("_") and isinstance(value, str) + } + assert old_value not in all_values.values(), ( + f"Old attribute value {old_value!r} is still present in SpanAttributes. " + f"It should have been renamed." + ) + + +class TestSpanAttributesUnchanged: + """Constants that should NOT have changed — sanity check.""" + + def test_traceloop_span_kind_unchanged(self): + assert SpanAttributes.TRACELOOP_SPAN_KIND == "traceloop.span.kind" + + +# --------------------------------------------------------------------------- +# SpanAttributes — Watsonx vendor-specific attributes (renamed to GEN_AI_WATSONX_*) +# --------------------------------------------------------------------------- + + +class TestSpanAttributesWatsonxKept: + """ + llm.watsonx.* span attributes are intentionally kept. These use llm.watsonx as a + vendor-qualified prefix (analogous to db.chroma.*), not a generic llm.* namespace. + The Python names have been renamed to GEN_AI_WATSONX_* prefix. + """ + + def test_watsonx_decoding_method_kept(self): + assert SpanAttributes.GEN_AI_WATSONX_DECODING_METHOD == "llm.watsonx.decoding_method" + + def test_watsonx_random_seed_kept(self): + assert SpanAttributes.GEN_AI_WATSONX_RANDOM_SEED == "llm.watsonx.random_seed" + + def test_watsonx_max_new_tokens_kept(self): + assert SpanAttributes.GEN_AI_WATSONX_MAX_NEW_TOKENS == "llm.watsonx.max_new_tokens" + + def test_watsonx_min_new_tokens_kept(self): + assert SpanAttributes.GEN_AI_WATSONX_MIN_NEW_TOKENS == "llm.watsonx.min_new_tokens" + + def test_watsonx_repetition_penalty_kept(self): + assert SpanAttributes.GEN_AI_WATSONX_REPETITION_PENALTY == "llm.watsonx.repetition_penalty" + + +# --------------------------------------------------------------------------- +# GenAISystem enum — values must match OTel GenAiSystemValues where possible +# --------------------------------------------------------------------------- + + +class TestGenAISystemOtelAligned: + """Enum members that have a counterpart in OTel GenAiSystemValues.""" + + def test_openai(self): + assert GenAISystem.OPENAI.value == "openai" + + def test_anthropic_lowercase(self): + # Was "Anthropic" — must now match OTel GenAiSystemValues.ANTHROPIC + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.ANTHROPIC.value == GenAiSystemValues.ANTHROPIC.value + assert GenAISystem.ANTHROPIC.value == "anthropic" + + def test_cohere_lowercase(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.COHERE.value == GenAiSystemValues.COHERE.value + assert GenAISystem.COHERE.value == "cohere" + + def test_mistralai_spec_format(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.MISTRALAI.value == GenAiSystemValues.MISTRAL_AI.value + assert GenAISystem.MISTRALAI.value == "mistral_ai" + + def test_groq_lowercase(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.GROQ.value == GenAiSystemValues.GROQ.value + assert GenAISystem.GROQ.value == "groq" + + def test_watsonx_spec_format(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.WATSONX.value == GenAiSystemValues.IBM_WATSONX_AI.value + assert GenAISystem.WATSONX.value == "ibm.watsonx.ai" + + def test_aws_spec_format(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.AWS.value == GenAiSystemValues.AWS_BEDROCK.value + assert GenAISystem.AWS.value == "aws.bedrock" + + def test_azure_spec_format(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.AZURE.value == GenAiSystemValues.AZ_AI_OPENAI.value + assert GenAISystem.AZURE.value == "az.ai.openai" + + def test_google_spec_format(self): + from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiSystemValues + + assert GenAISystem.GOOGLE.value == GenAiSystemValues.GCP_GEN_AI.value + assert GenAISystem.GOOGLE.value == "gcp.gen_ai" + + +class TestGenAISystemProjectValues: + """Enum members without an OTel counterpart — project-defined lowercase values.""" + + def test_ollama(self): + assert GenAISystem.OLLAMA.value == "ollama" + + def test_aleph_alpha(self): + assert GenAISystem.ALEPH_ALPHA.value == "aleph_alpha" + + def test_replicate(self): + assert GenAISystem.REPLICATE.value == "replicate" + + def test_together_ai(self): + assert GenAISystem.TOGETHER_AI.value == "together_ai" + + def test_huggingface(self): + assert GenAISystem.HUGGINGFACE.value == "hugging_face" + + def test_fireworks(self): + assert GenAISystem.FIREWORKS.value == "fireworks" + + def test_openrouter(self): + assert GenAISystem.OPENROUTER.value == "openrouter" + + def test_langchain(self): + assert GenAISystem.LANGCHAIN.value == "langchain" + + def test_crewai(self): + assert GenAISystem.CREWAI.value == "crewai" + + +class TestGenAISystemNoCaps: + """All GenAISystem values must be lowercase (no PascalCase or camelCase).""" + + def test_all_values_lowercase(self): + non_lowercase = [ + member.name + for member in GenAISystem + if member.value != member.value.lower() and "." not in member.value + ] + assert non_lowercase == [], ( + f"GenAISystem members have non-lowercase values: {non_lowercase}. " + "Values should use lowercase with dots or underscores." + ) + + +# --------------------------------------------------------------------------- +# Meters — metric names must use gen_ai.* namespace +# --------------------------------------------------------------------------- + + +class TestMetersGenAiNamespace: + """Generic metric names must use gen_ai.* namespace.""" + + def test_streaming_time_to_generate(self): + assert Meters.LLM_STREAMING_TIME_TO_GENERATE == "llm.chat_completions.streaming_time_to_generate" + + def test_core_metrics_unchanged(self): + """Core gen_ai.client.* metrics already had the correct namespace.""" + assert Meters.LLM_GENERATION_CHOICES == "gen_ai.client.generation.choices" + assert Meters.LLM_TOKEN_USAGE == "gen_ai.client.token.usage" + assert Meters.LLM_OPERATION_DURATION == "gen_ai.client.operation.duration" + + +class TestMetersVendorNamespacesKept: + """ + Vendor-qualified metric names (llm.openai.*, llm.anthropic.*, llm.watsonx.*) + are intentionally kept. The llm. prefix is a vendor identifier, not the + generic llm.* attribute namespace being migrated. These will be renamed in the + respective package PRs if/when those vendors adopt the gen_ai namespace. + """ + + def test_openai_completions_exceptions_kept(self): + assert Meters.LLM_COMPLETIONS_EXCEPTIONS == "llm.openai.chat_completions.exceptions" + + def test_openai_embeddings_exceptions_kept(self): + assert Meters.LLM_EMBEDDINGS_EXCEPTIONS == "llm.openai.embeddings.exceptions" + + def test_openai_embeddings_vector_size_kept(self): + assert Meters.LLM_EMBEDDINGS_VECTOR_SIZE == "llm.openai.embeddings.vector_size" + + def test_openai_image_generations_exceptions_kept(self): + assert Meters.LLM_IMAGE_GENERATIONS_EXCEPTIONS == "llm.openai.image_generations.exceptions" + + def test_anthropic_completion_exceptions_kept(self): + assert Meters.LLM_ANTHROPIC_COMPLETION_EXCEPTIONS == "llm.anthropic.completion.exceptions" + + def test_watsonx_metrics_kept(self): + assert Meters.LLM_WATSONX_COMPLETIONS_DURATION == "llm.watsonx.completions.duration" + assert Meters.LLM_WATSONX_COMPLETIONS_EXCEPTIONS == "llm.watsonx.completions.exceptions" + assert Meters.LLM_WATSONX_COMPLETIONS_RESPONSES == "llm.watsonx.completions.responses" + assert Meters.LLM_WATSONX_COMPLETIONS_TOKENS == "llm.watsonx.completions.tokens" diff --git a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py index 6ff6db180c..3d187266f1 100644 --- a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py +++ b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/version.py @@ -1 +1 @@ -__version__ = "0.4.16" +__version__ = "0.5.0" diff --git a/packages/opentelemetry-semantic-conventions-ai/pyproject.toml b/packages/opentelemetry-semantic-conventions-ai/pyproject.toml index 414f0e296a..581b3b41d1 100644 --- a/packages/opentelemetry-semantic-conventions-ai/pyproject.toml +++ b/packages/opentelemetry-semantic-conventions-ai/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.16" +version = "0.5.0" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" authors = [ { name = "Gal Kleinman", email = "gal@traceloop.com" }, diff --git a/packages/opentelemetry-semantic-conventions-ai/tests/test_semconv_compliance.py b/packages/opentelemetry-semantic-conventions-ai/tests/test_semconv_compliance.py new file mode 100644 index 0000000000..de81d3aa6b --- /dev/null +++ b/packages/opentelemetry-semantic-conventions-ai/tests/test_semconv_compliance.py @@ -0,0 +1,6 @@ +# ruff: noqa: F401, F403 +""" +Semconv compliance tests re-used from opentelemetry-semantic-conventions-ai. +To add more compliance checks, update _testing.py in that package — not here. +""" +from opentelemetry.semconv_ai._testing import * diff --git a/packages/opentelemetry-semantic-conventions-ai/tests/test_span_attributes.py b/packages/opentelemetry-semantic-conventions-ai/tests/test_span_attributes.py new file mode 100644 index 0000000000..eb6f56b70f --- /dev/null +++ b/packages/opentelemetry-semantic-conventions-ai/tests/test_span_attributes.py @@ -0,0 +1,7 @@ +# ruff: noqa: F401, F403 +""" +Semconv compliance tests — authoritative source lives in +opentelemetry/semconv_ai/_testing.py so any instrumentation package +can reuse them with a single import. +""" +from opentelemetry.semconv_ai._testing import * diff --git a/packages/opentelemetry-semantic-conventions-ai/uv.lock b/packages/opentelemetry-semantic-conventions-ai/uv.lock index 4283fafde2..c755090c04 100644 --- a/packages/opentelemetry-semantic-conventions-ai/uv.lock +++ b/packages/opentelemetry-semantic-conventions-ai/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 1 +revision = 3 requires-python = ">=3.9, <4" resolution-markers = [ "python_full_version >= '3.10'", @@ -14,18 +14,18 @@ dependencies = [ { name = "pycodestyle" }, { name = "tomli", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/50/d8/30873d2b7b57dee9263e53d142da044c4600a46f2d28374b3e38b023df16/autopep8-2.3.2.tar.gz", hash = "sha256:89440a4f969197b69a995e4ce0661b031f455a9f776d2c5ba3dbd83466931758", size = 92210 } +sdist = { url = "https://files.pythonhosted.org/packages/50/d8/30873d2b7b57dee9263e53d142da044c4600a46f2d28374b3e38b023df16/autopep8-2.3.2.tar.gz", hash = "sha256:89440a4f969197b69a995e4ce0661b031f455a9f776d2c5ba3dbd83466931758", size = 92210, upload-time = "2025-01-14T14:46:18.454Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9e/43/53afb8ba17218f19b77c7834128566c5bbb100a0ad9ba2e8e89d089d7079/autopep8-2.3.2-py2.py3-none-any.whl", hash = "sha256:ce8ad498672c845a0c3de2629c15b635ec2b05ef8177a6e7c91c74f3e9b51128", size = 45807 }, + { url = "https://files.pythonhosted.org/packages/9e/43/53afb8ba17218f19b77c7834128566c5bbb100a0ad9ba2e8e89d089d7079/autopep8-2.3.2-py2.py3-none-any.whl", hash = "sha256:ce8ad498672c845a0c3de2629c15b635ec2b05ef8177a6e7c91c74f3e9b51128", size = 45807, upload-time = "2025-01-14T14:46:15.466Z" }, ] [[package]] name = "colorama" version = "0.4.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] [[package]] @@ -35,9 +35,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371 } +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740 }, + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, ] [[package]] @@ -47,9 +47,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107 } +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865 }, + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, ] [[package]] @@ -59,9 +59,9 @@ source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version < '3.10'", ] -sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793 } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050 }, + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, ] [[package]] @@ -71,9 +71,9 @@ source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.10'", ] -sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503 } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484 }, + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] [[package]] @@ -84,9 +84,9 @@ dependencies = [ { name = "importlib-metadata" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767 } +sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767, upload-time = "2025-12-11T13:32:39.182Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356 }, + { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356, upload-time = "2025-12-11T13:32:17.304Z" }, ] [[package]] @@ -98,9 +98,9 @@ dependencies = [ { name = "opentelemetry-semantic-conventions" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460 } +sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460, upload-time = "2025-12-11T13:32:49.369Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565 }, + { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565, upload-time = "2025-12-11T13:32:35.069Z" }, ] [[package]] @@ -111,14 +111,14 @@ dependencies = [ { name = "opentelemetry-api" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935 } +sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935, upload-time = "2025-12-11T13:32:50.487Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982 }, + { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982, upload-time = "2025-12-11T13:32:36.955Z" }, ] [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.13" +version = "0.5.0" source = { editable = "." } dependencies = [ { name = "opentelemetry-sdk" }, @@ -151,36 +151,36 @@ dev = [ name = "packaging" version = "25.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727 } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469 }, + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] [[package]] name = "pluggy" version = "1.6.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412 } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538 }, + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, ] [[package]] name = "pycodestyle" version = "2.14.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/11/e0/abfd2a0d2efe47670df87f3e3a0e2edda42f055053c85361f19c0e2c1ca8/pycodestyle-2.14.0.tar.gz", hash = "sha256:c4b5b517d278089ff9d0abdec919cd97262a3367449ea1c8b49b91529167b783", size = 39472 } +sdist = { url = "https://files.pythonhosted.org/packages/11/e0/abfd2a0d2efe47670df87f3e3a0e2edda42f055053c85361f19c0e2c1ca8/pycodestyle-2.14.0.tar.gz", hash = "sha256:c4b5b517d278089ff9d0abdec919cd97262a3367449ea1c8b49b91529167b783", size = 39472, upload-time = "2025-06-20T18:49:48.75Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/27/a58ddaf8c588a3ef080db9d0b7e0b97215cee3a45df74f3a94dbbf5c893a/pycodestyle-2.14.0-py2.py3-none-any.whl", hash = "sha256:dd6bf7cb4ee77f8e016f9c8e74a35ddd9f67e1d5fd4184d86c3b98e07099f42d", size = 31594 }, + { url = "https://files.pythonhosted.org/packages/d7/27/a58ddaf8c588a3ef080db9d0b7e0b97215cee3a45df74f3a94dbbf5c893a/pycodestyle-2.14.0-py2.py3-none-any.whl", hash = "sha256:dd6bf7cb4ee77f8e016f9c8e74a35ddd9f67e1d5fd4184d86c3b98e07099f42d", size = 31594, upload-time = "2025-06-20T18:49:47.491Z" }, ] [[package]] name = "pygments" version = "2.19.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631 } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217 }, + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, ] [[package]] @@ -197,9 +197,9 @@ dependencies = [ { name = "pygments" }, { name = "tomli", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618 } +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750 }, + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, ] [[package]] @@ -212,35 +212,35 @@ dependencies = [ { name = "termcolor", version = "3.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "termcolor", version = "3.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f5/ac/5754f5edd6d508bc6493bc37d74b928f102a5fff82d9a80347e180998f08/pytest-sugar-1.0.0.tar.gz", hash = "sha256:6422e83258f5b0c04ce7c632176c7732cab5fdb909cb39cca5c9139f81276c0a", size = 14992 } +sdist = { url = "https://files.pythonhosted.org/packages/f5/ac/5754f5edd6d508bc6493bc37d74b928f102a5fff82d9a80347e180998f08/pytest-sugar-1.0.0.tar.gz", hash = "sha256:6422e83258f5b0c04ce7c632176c7732cab5fdb909cb39cca5c9139f81276c0a", size = 14992, upload-time = "2024-02-01T18:30:36.735Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/92/fb/889f1b69da2f13691de09a111c16c4766a433382d44aa0ecf221deded44a/pytest_sugar-1.0.0-py3-none-any.whl", hash = "sha256:70ebcd8fc5795dc457ff8b69d266a4e2e8a74ae0c3edc749381c64b5246c8dfd", size = 10171 }, + { url = "https://files.pythonhosted.org/packages/92/fb/889f1b69da2f13691de09a111c16c4766a433382d44aa0ecf221deded44a/pytest_sugar-1.0.0-py3-none-any.whl", hash = "sha256:70ebcd8fc5795dc457ff8b69d266a4e2e8a74ae0c3edc749381c64b5246c8dfd", size = 10171, upload-time = "2024-02-01T18:30:29.395Z" }, ] [[package]] name = "ruff" version = "0.14.11" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d4/77/9a7fe084d268f8855d493e5031ea03fa0af8cc05887f638bf1c4e3363eb8/ruff-0.14.11.tar.gz", hash = "sha256:f6dc463bfa5c07a59b1ff2c3b9767373e541346ea105503b4c0369c520a66958", size = 5993417 } +sdist = { url = "https://files.pythonhosted.org/packages/d4/77/9a7fe084d268f8855d493e5031ea03fa0af8cc05887f638bf1c4e3363eb8/ruff-0.14.11.tar.gz", hash = "sha256:f6dc463bfa5c07a59b1ff2c3b9767373e541346ea105503b4c0369c520a66958", size = 5993417, upload-time = "2026-01-08T19:11:58.322Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f0/a6/a4c40a5aaa7e331f245d2dc1ac8ece306681f52b636b40ef87c88b9f7afd/ruff-0.14.11-py3-none-linux_armv6l.whl", hash = "sha256:f6ff2d95cbd335841a7217bdfd9c1d2e44eac2c584197ab1385579d55ff8830e", size = 12951208 }, - { url = "https://files.pythonhosted.org/packages/5c/5c/360a35cb7204b328b685d3129c08aca24765ff92b5a7efedbdd6c150d555/ruff-0.14.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f6eb5c1c8033680f4172ea9c8d3706c156223010b8b97b05e82c59bdc774ee6", size = 13330075 }, - { url = "https://files.pythonhosted.org/packages/1b/9e/0cc2f1be7a7d33cae541824cf3f95b4ff40d03557b575912b5b70273c9ec/ruff-0.14.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f2fc34cc896f90080fca01259f96c566f74069a04b25b6205d55379d12a6855e", size = 12257809 }, - { url = "https://files.pythonhosted.org/packages/a7/e5/5faab97c15bb75228d9f74637e775d26ac703cc2b4898564c01ab3637c02/ruff-0.14.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53386375001773ae812b43205d6064dae49ff0968774e6befe16a994fc233caa", size = 12678447 }, - { url = "https://files.pythonhosted.org/packages/1b/33/e9767f60a2bef779fb5855cab0af76c488e0ce90f7bb7b8a45c8a2ba4178/ruff-0.14.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a697737dce1ca97a0a55b5ff0434ee7205943d4874d638fe3ae66166ff46edbe", size = 12758560 }, - { url = "https://files.pythonhosted.org/packages/eb/84/4c6cf627a21462bb5102f7be2a320b084228ff26e105510cd2255ea868e5/ruff-0.14.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6845ca1da8ab81ab1dce755a32ad13f1db72e7fba27c486d5d90d65e04d17b8f", size = 13599296 }, - { url = "https://files.pythonhosted.org/packages/88/e1/92b5ed7ea66d849f6157e695dc23d5d6d982bd6aa8d077895652c38a7cae/ruff-0.14.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e36ce2fd31b54065ec6f76cb08d60159e1b32bdf08507862e32f47e6dde8bcbf", size = 15048981 }, - { url = "https://files.pythonhosted.org/packages/61/df/c1bd30992615ac17c2fb64b8a7376ca22c04a70555b5d05b8f717163cf9f/ruff-0.14.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:590bcc0e2097ecf74e62a5c10a6b71f008ad82eb97b0a0079e85defe19fe74d9", size = 14633183 }, - { url = "https://files.pythonhosted.org/packages/04/e9/fe552902f25013dd28a5428a42347d9ad20c4b534834a325a28305747d64/ruff-0.14.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:53fe71125fc158210d57fe4da26e622c9c294022988d08d9347ec1cf782adafe", size = 14050453 }, - { url = "https://files.pythonhosted.org/packages/ae/93/f36d89fa021543187f98991609ce6e47e24f35f008dfe1af01379d248a41/ruff-0.14.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a35c9da08562f1598ded8470fcfef2afb5cf881996e6c0a502ceb61f4bc9c8a3", size = 13757889 }, - { url = "https://files.pythonhosted.org/packages/b7/9f/c7fb6ecf554f28709a6a1f2a7f74750d400979e8cd47ed29feeaa1bd4db8/ruff-0.14.11-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:0f3727189a52179393ecf92ec7057c2210203e6af2676f08d92140d3e1ee72c1", size = 13955832 }, - { url = "https://files.pythonhosted.org/packages/db/a0/153315310f250f76900a98278cf878c64dfb6d044e184491dd3289796734/ruff-0.14.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:eb09f849bd37147a789b85995ff734a6c4a095bed5fd1608c4f56afc3634cde2", size = 12586522 }, - { url = "https://files.pythonhosted.org/packages/2f/2b/a73a2b6e6d2df1d74bf2b78098be1572191e54bec0e59e29382d13c3adc5/ruff-0.14.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:c61782543c1231bf71041461c1f28c64b961d457d0f238ac388e2ab173d7ecb7", size = 12724637 }, - { url = "https://files.pythonhosted.org/packages/f0/41/09100590320394401cd3c48fc718a8ba71c7ddb1ffd07e0ad6576b3a3df2/ruff-0.14.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:82ff352ea68fb6766140381748e1f67f83c39860b6446966cff48a315c3e2491", size = 13145837 }, - { url = "https://files.pythonhosted.org/packages/3b/d8/e035db859d1d3edf909381eb8ff3e89a672d6572e9454093538fe6f164b0/ruff-0.14.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:728e56879df4ca5b62a9dde2dd0eb0edda2a55160c0ea28c4025f18c03f86984", size = 13850469 }, - { url = "https://files.pythonhosted.org/packages/4e/02/bb3ff8b6e6d02ce9e3740f4c17dfbbfb55f34c789c139e9cd91985f356c7/ruff-0.14.11-py3-none-win32.whl", hash = "sha256:337c5dd11f16ee52ae217757d9b82a26400be7efac883e9e852646f1557ed841", size = 12851094 }, - { url = "https://files.pythonhosted.org/packages/58/f1/90ddc533918d3a2ad628bc3044cdfc094949e6d4b929220c3f0eb8a1c998/ruff-0.14.11-py3-none-win_amd64.whl", hash = "sha256:f981cea63d08456b2c070e64b79cb62f951aa1305282974d4d5216e6e0178ae6", size = 14001379 }, - { url = "https://files.pythonhosted.org/packages/c4/1c/1dbe51782c0e1e9cfce1d1004752672d2d4629ea46945d19d731ad772b3b/ruff-0.14.11-py3-none-win_arm64.whl", hash = "sha256:649fb6c9edd7f751db276ef42df1f3df41c38d67d199570ae2a7bd6cbc3590f0", size = 12938644 }, + { url = "https://files.pythonhosted.org/packages/f0/a6/a4c40a5aaa7e331f245d2dc1ac8ece306681f52b636b40ef87c88b9f7afd/ruff-0.14.11-py3-none-linux_armv6l.whl", hash = "sha256:f6ff2d95cbd335841a7217bdfd9c1d2e44eac2c584197ab1385579d55ff8830e", size = 12951208, upload-time = "2026-01-08T19:12:09.218Z" }, + { url = "https://files.pythonhosted.org/packages/5c/5c/360a35cb7204b328b685d3129c08aca24765ff92b5a7efedbdd6c150d555/ruff-0.14.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f6eb5c1c8033680f4172ea9c8d3706c156223010b8b97b05e82c59bdc774ee6", size = 13330075, upload-time = "2026-01-08T19:12:02.549Z" }, + { url = "https://files.pythonhosted.org/packages/1b/9e/0cc2f1be7a7d33cae541824cf3f95b4ff40d03557b575912b5b70273c9ec/ruff-0.14.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f2fc34cc896f90080fca01259f96c566f74069a04b25b6205d55379d12a6855e", size = 12257809, upload-time = "2026-01-08T19:12:00.366Z" }, + { url = "https://files.pythonhosted.org/packages/a7/e5/5faab97c15bb75228d9f74637e775d26ac703cc2b4898564c01ab3637c02/ruff-0.14.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53386375001773ae812b43205d6064dae49ff0968774e6befe16a994fc233caa", size = 12678447, upload-time = "2026-01-08T19:12:13.899Z" }, + { url = "https://files.pythonhosted.org/packages/1b/33/e9767f60a2bef779fb5855cab0af76c488e0ce90f7bb7b8a45c8a2ba4178/ruff-0.14.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a697737dce1ca97a0a55b5ff0434ee7205943d4874d638fe3ae66166ff46edbe", size = 12758560, upload-time = "2026-01-08T19:11:42.55Z" }, + { url = "https://files.pythonhosted.org/packages/eb/84/4c6cf627a21462bb5102f7be2a320b084228ff26e105510cd2255ea868e5/ruff-0.14.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6845ca1da8ab81ab1dce755a32ad13f1db72e7fba27c486d5d90d65e04d17b8f", size = 13599296, upload-time = "2026-01-08T19:11:30.371Z" }, + { url = "https://files.pythonhosted.org/packages/88/e1/92b5ed7ea66d849f6157e695dc23d5d6d982bd6aa8d077895652c38a7cae/ruff-0.14.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e36ce2fd31b54065ec6f76cb08d60159e1b32bdf08507862e32f47e6dde8bcbf", size = 15048981, upload-time = "2026-01-08T19:12:04.742Z" }, + { url = "https://files.pythonhosted.org/packages/61/df/c1bd30992615ac17c2fb64b8a7376ca22c04a70555b5d05b8f717163cf9f/ruff-0.14.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:590bcc0e2097ecf74e62a5c10a6b71f008ad82eb97b0a0079e85defe19fe74d9", size = 14633183, upload-time = "2026-01-08T19:11:40.069Z" }, + { url = "https://files.pythonhosted.org/packages/04/e9/fe552902f25013dd28a5428a42347d9ad20c4b534834a325a28305747d64/ruff-0.14.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:53fe71125fc158210d57fe4da26e622c9c294022988d08d9347ec1cf782adafe", size = 14050453, upload-time = "2026-01-08T19:11:37.555Z" }, + { url = "https://files.pythonhosted.org/packages/ae/93/f36d89fa021543187f98991609ce6e47e24f35f008dfe1af01379d248a41/ruff-0.14.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a35c9da08562f1598ded8470fcfef2afb5cf881996e6c0a502ceb61f4bc9c8a3", size = 13757889, upload-time = "2026-01-08T19:12:07.094Z" }, + { url = "https://files.pythonhosted.org/packages/b7/9f/c7fb6ecf554f28709a6a1f2a7f74750d400979e8cd47ed29feeaa1bd4db8/ruff-0.14.11-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:0f3727189a52179393ecf92ec7057c2210203e6af2676f08d92140d3e1ee72c1", size = 13955832, upload-time = "2026-01-08T19:11:55.064Z" }, + { url = "https://files.pythonhosted.org/packages/db/a0/153315310f250f76900a98278cf878c64dfb6d044e184491dd3289796734/ruff-0.14.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:eb09f849bd37147a789b85995ff734a6c4a095bed5fd1608c4f56afc3634cde2", size = 12586522, upload-time = "2026-01-08T19:11:35.356Z" }, + { url = "https://files.pythonhosted.org/packages/2f/2b/a73a2b6e6d2df1d74bf2b78098be1572191e54bec0e59e29382d13c3adc5/ruff-0.14.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:c61782543c1231bf71041461c1f28c64b961d457d0f238ac388e2ab173d7ecb7", size = 12724637, upload-time = "2026-01-08T19:11:47.796Z" }, + { url = "https://files.pythonhosted.org/packages/f0/41/09100590320394401cd3c48fc718a8ba71c7ddb1ffd07e0ad6576b3a3df2/ruff-0.14.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:82ff352ea68fb6766140381748e1f67f83c39860b6446966cff48a315c3e2491", size = 13145837, upload-time = "2026-01-08T19:11:32.87Z" }, + { url = "https://files.pythonhosted.org/packages/3b/d8/e035db859d1d3edf909381eb8ff3e89a672d6572e9454093538fe6f164b0/ruff-0.14.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:728e56879df4ca5b62a9dde2dd0eb0edda2a55160c0ea28c4025f18c03f86984", size = 13850469, upload-time = "2026-01-08T19:12:11.694Z" }, + { url = "https://files.pythonhosted.org/packages/4e/02/bb3ff8b6e6d02ce9e3740f4c17dfbbfb55f34c789c139e9cd91985f356c7/ruff-0.14.11-py3-none-win32.whl", hash = "sha256:337c5dd11f16ee52ae217757d9b82a26400be7efac883e9e852646f1557ed841", size = 12851094, upload-time = "2026-01-08T19:11:45.163Z" }, + { url = "https://files.pythonhosted.org/packages/58/f1/90ddc533918d3a2ad628bc3044cdfc094949e6d4b929220c3f0eb8a1c998/ruff-0.14.11-py3-none-win_amd64.whl", hash = "sha256:f981cea63d08456b2c070e64b79cb62f951aa1305282974d4d5216e6e0178ae6", size = 14001379, upload-time = "2026-01-08T19:11:52.591Z" }, + { url = "https://files.pythonhosted.org/packages/c4/1c/1dbe51782c0e1e9cfce1d1004752672d2d4629ea46945d19d731ad772b3b/ruff-0.14.11-py3-none-win_arm64.whl", hash = "sha256:649fb6c9edd7f751db276ef42df1f3df41c38d67d199570ae2a7bd6cbc3590f0", size = 12938644, upload-time = "2026-01-08T19:11:50.027Z" }, ] [[package]] @@ -250,9 +250,9 @@ source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version < '3.10'", ] -sdist = { url = "https://files.pythonhosted.org/packages/ca/6c/3d75c196ac07ac8749600b60b03f4f6094d54e132c4d94ebac6ee0e0add0/termcolor-3.1.0.tar.gz", hash = "sha256:6a6dd7fbee581909eeec6a756cff1d7f7c376063b14e4a298dc4980309e55970", size = 14324 } +sdist = { url = "https://files.pythonhosted.org/packages/ca/6c/3d75c196ac07ac8749600b60b03f4f6094d54e132c4d94ebac6ee0e0add0/termcolor-3.1.0.tar.gz", hash = "sha256:6a6dd7fbee581909eeec6a756cff1d7f7c376063b14e4a298dc4980309e55970", size = 14324, upload-time = "2025-04-30T11:37:53.791Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4f/bd/de8d508070629b6d84a30d01d57e4a65c69aa7f5abe7560b8fad3b50ea59/termcolor-3.1.0-py3-none-any.whl", hash = "sha256:591dd26b5c2ce03b9e43f391264626557873ce1d379019786f99b0c2bee140aa", size = 7684 }, + { url = "https://files.pythonhosted.org/packages/4f/bd/de8d508070629b6d84a30d01d57e4a65c69aa7f5abe7560b8fad3b50ea59/termcolor-3.1.0-py3-none-any.whl", hash = "sha256:591dd26b5c2ce03b9e43f391264626557873ce1d379019786f99b0c2bee140aa", size = 7684, upload-time = "2025-04-30T11:37:52.382Z" }, ] [[package]] @@ -262,79 +262,79 @@ source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version >= '3.10'", ] -sdist = { url = "https://files.pythonhosted.org/packages/46/79/cf31d7a93a8fdc6aa0fbb665be84426a8c5a557d9240b6239e9e11e35fc5/termcolor-3.3.0.tar.gz", hash = "sha256:348871ca648ec6a9a983a13ab626c0acce02f515b9e1983332b17af7979521c5", size = 14434 } +sdist = { url = "https://files.pythonhosted.org/packages/46/79/cf31d7a93a8fdc6aa0fbb665be84426a8c5a557d9240b6239e9e11e35fc5/termcolor-3.3.0.tar.gz", hash = "sha256:348871ca648ec6a9a983a13ab626c0acce02f515b9e1983332b17af7979521c5", size = 14434, upload-time = "2025-12-29T12:55:21.882Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/33/d1/8bb87d21e9aeb323cc03034f5eaf2c8f69841e40e4853c2627edf8111ed3/termcolor-3.3.0-py3-none-any.whl", hash = "sha256:cf642efadaf0a8ebbbf4bc7a31cec2f9b5f21a9f726f4ccbb08192c9c26f43a5", size = 7734 }, + { url = "https://files.pythonhosted.org/packages/33/d1/8bb87d21e9aeb323cc03034f5eaf2c8f69841e40e4853c2627edf8111ed3/termcolor-3.3.0-py3-none-any.whl", hash = "sha256:cf642efadaf0a8ebbbf4bc7a31cec2f9b5f21a9f726f4ccbb08192c9c26f43a5", size = 7734, upload-time = "2025-12-29T12:55:20.718Z" }, ] [[package]] name = "tomli" version = "2.4.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477 } +sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477, upload-time = "2026-01-11T11:22:38.165Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663 }, - { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469 }, - { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039 }, - { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007 }, - { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875 }, - { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271 }, - { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770 }, - { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626 }, - { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842 }, - { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894 }, - { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053 }, - { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481 }, - { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720 }, - { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014 }, - { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820 }, - { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712 }, - { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296 }, - { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553 }, - { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915 }, - { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038 }, - { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245 }, - { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335 }, - { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962 }, - { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396 }, - { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530 }, - { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227 }, - { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748 }, - { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725 }, - { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901 }, - { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375 }, - { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639 }, - { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897 }, - { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697 }, - { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567 }, - { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556 }, - { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014 }, - { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339 }, - { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490 }, - { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398 }, - { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515 }, - { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806 }, - { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340 }, - { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106 }, - { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504 }, - { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561 }, - { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477 }, + { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663, upload-time = "2026-01-11T11:21:45.27Z" }, + { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469, upload-time = "2026-01-11T11:21:46.873Z" }, + { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039, upload-time = "2026-01-11T11:21:48.503Z" }, + { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007, upload-time = "2026-01-11T11:21:49.456Z" }, + { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875, upload-time = "2026-01-11T11:21:50.755Z" }, + { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271, upload-time = "2026-01-11T11:21:51.81Z" }, + { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770, upload-time = "2026-01-11T11:21:52.647Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626, upload-time = "2026-01-11T11:21:53.459Z" }, + { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842, upload-time = "2026-01-11T11:21:54.831Z" }, + { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894, upload-time = "2026-01-11T11:21:56.07Z" }, + { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053, upload-time = "2026-01-11T11:21:57.467Z" }, + { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481, upload-time = "2026-01-11T11:21:58.661Z" }, + { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720, upload-time = "2026-01-11T11:22:00.178Z" }, + { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014, upload-time = "2026-01-11T11:22:01.238Z" }, + { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820, upload-time = "2026-01-11T11:22:02.727Z" }, + { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712, upload-time = "2026-01-11T11:22:03.777Z" }, + { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296, upload-time = "2026-01-11T11:22:04.86Z" }, + { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553, upload-time = "2026-01-11T11:22:05.854Z" }, + { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915, upload-time = "2026-01-11T11:22:06.703Z" }, + { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038, upload-time = "2026-01-11T11:22:07.56Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245, upload-time = "2026-01-11T11:22:08.344Z" }, + { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335, upload-time = "2026-01-11T11:22:09.951Z" }, + { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962, upload-time = "2026-01-11T11:22:11.27Z" }, + { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396, upload-time = "2026-01-11T11:22:12.325Z" }, + { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530, upload-time = "2026-01-11T11:22:13.865Z" }, + { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227, upload-time = "2026-01-11T11:22:15.224Z" }, + { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748, upload-time = "2026-01-11T11:22:16.009Z" }, + { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725, upload-time = "2026-01-11T11:22:17.269Z" }, + { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901, upload-time = "2026-01-11T11:22:18.287Z" }, + { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375, upload-time = "2026-01-11T11:22:19.154Z" }, + { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639, upload-time = "2026-01-11T11:22:20.168Z" }, + { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897, upload-time = "2026-01-11T11:22:21.544Z" }, + { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697, upload-time = "2026-01-11T11:22:23.058Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567, upload-time = "2026-01-11T11:22:24.033Z" }, + { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556, upload-time = "2026-01-11T11:22:25.378Z" }, + { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014, upload-time = "2026-01-11T11:22:26.138Z" }, + { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339, upload-time = "2026-01-11T11:22:27.143Z" }, + { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490, upload-time = "2026-01-11T11:22:28.399Z" }, + { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398, upload-time = "2026-01-11T11:22:29.345Z" }, + { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515, upload-time = "2026-01-11T11:22:30.327Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806, upload-time = "2026-01-11T11:22:32.56Z" }, + { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340, upload-time = "2026-01-11T11:22:33.505Z" }, + { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106, upload-time = "2026-01-11T11:22:34.451Z" }, + { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504, upload-time = "2026-01-11T11:22:35.764Z" }, + { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561, upload-time = "2026-01-11T11:22:36.624Z" }, + { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477, upload-time = "2026-01-11T11:22:37.446Z" }, ] [[package]] name = "typing-extensions" version = "4.15.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391 } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614 }, + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, ] [[package]] name = "zipp" version = "3.23.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547 } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276 }, + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, ]