Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
9eff4ac
feat: Anthropic to gen-ai OTEL atributes
max-deygin-servicenow Mar 16, 2026
54e2520
Fixed a double encoded json
max-deygin-servicenow Mar 16, 2026
c3a092b
fix: lint failures
max-deygin-traceloop Mar 16, 2026
e6361a4
Aded otel conv tests for Anthropic package
max-deygin-traceloop Mar 17, 2026
51862e5
feat(anthropic): use upstream gen_ai constants directly, rename LLM_ …
max-deygin-traceloop Mar 18, 2026
62852a0
fix(anthropic): use SpanAttributes constant for structured_output_schema
max-deygin-traceloop Mar 18, 2026
502339f
fix(anthropic): remove unused SpanAttributes import in test_semconv_s…
max-deygin-traceloop Mar 18, 2026
637515c
fix(anthropic): update tests to use renamed/changed attribute strings
max-deygin-traceloop Mar 18, 2026
480febe
removed redundant GEN_AI_SYSTEM attr
max-deygin-traceloop Mar 22, 2026
5a7b9c9
fix(anthropic): conform message attributes to OTel GenAI JSON schemas
max-deygin-traceloop Mar 23, 2026
470f3dc
uv lock bump
max-deygin-traceloop Mar 23, 2026
8d55177
chore(anthropic): bump anthropic SDK from 0.76 to 0.86
max-deygin-traceloop Mar 23, 2026
d48203a
fix(anthropic): fix lint line length in test_messages.py
max-deygin-traceloop Mar 23, 2026
53cbe2e
fix(anthropic): align instrumentation with OTel GenAI semconv v1.40.0
max-deygin-traceloop Mar 29, 2026
79cd646
Merge branch 'main' into max/tlp-1926-anthropic-instrumentation
OzBenSimhonTraceloop Mar 29, 2026
c5c0fe9
CR fixes #3
max-deygin-traceloop Mar 29, 2026
a57ee6d
CR comments #4
max-deygin-traceloop Mar 29, 2026
3e4b487
version
max-deygin-traceloop Mar 29, 2026
84b8c21
Merge branch 'main' into max/tlp-1926-anthropic-instrumentation
OzBenSimhonTraceloop Mar 29, 2026
4962b65
small-fix
OzBenSimhonTraceloop Mar 29, 2026
dc35511
lint
OzBenSimhonTraceloop Mar 29, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
emit_response_events,
)
from opentelemetry.instrumentation.anthropic.span_utils import (
_map_finish_reason,
aset_input_attributes,
set_response_attributes,
)
Expand All @@ -39,9 +40,12 @@
from opentelemetry.semconv._incubating.attributes import (
gen_ai_attributes as GenAIAttributes,
)
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
GenAiOperationNameValues,
GenAiSystemValues,
)
from opentelemetry.semconv_ai import (
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
LLMRequestTypeValues,
Meters,
SpanAttributes,
)
Expand Down Expand Up @@ -278,22 +282,24 @@ async def _aset_token_usage(
choices,
attributes={
**metric_attributes,
SpanAttributes.LLM_RESPONSE_STOP_REASON: getattr(response, "stop_reason", None),
SpanAttributes.GEN_AI_RESPONSE_FINISH_REASON: _map_finish_reason(
getattr(response, "stop_reason", None)
),
},
)

set_span_attribute(span, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
set_span_attribute(
span, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens
)
set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens)
set_span_attribute(span, SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens)

set_span_attribute(
span, SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS_DEPRECATED, cache_read_tokens
span, SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens
)
set_span_attribute(
span,
SpanAttributes.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS_DEPRECATED,
SpanAttributes.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS,
cache_creation_tokens,
)

Expand Down Expand Up @@ -392,22 +398,24 @@ def _set_token_usage(
choices,
attributes={
**metric_attributes,
SpanAttributes.LLM_RESPONSE_STOP_REASON: getattr(response, "stop_reason", None),
SpanAttributes.GEN_AI_RESPONSE_FINISH_REASON: _map_finish_reason(
getattr(response, "stop_reason", None)
),
},
)

set_span_attribute(span, GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
set_span_attribute(
span, GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS, completion_tokens
)
set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens)
set_span_attribute(span, SpanAttributes.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens)

set_span_attribute(
span, SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS_DEPRECATED, cache_read_tokens
span, SpanAttributes.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens
)
set_span_attribute(
span,
SpanAttributes.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS_DEPRECATED,
SpanAttributes.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS,
cache_creation_tokens,
)

Expand Down Expand Up @@ -537,12 +545,17 @@ def _wrap(
return wrapped(*args, **kwargs)

name = to_wrap.get("span_name")
operation_name = (
GenAiOperationNameValues.TEXT_COMPLETION.value
if name == "anthropic.completion"
else GenAiOperationNameValues.CHAT.value
)
span = tracer.start_span(
name,
kind=SpanKind.CLIENT,
attributes={
GenAIAttributes.GEN_AI_SYSTEM: "Anthropic",
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
GenAIAttributes.GEN_AI_PROVIDER_NAME: GenAiSystemValues.ANTHROPIC.value,
GenAIAttributes.GEN_AI_OPERATION_NAME: operation_name,
},
)

Expand Down Expand Up @@ -661,12 +674,17 @@ async def _awrap(
return await wrapped(*args, **kwargs)

name = to_wrap.get("span_name")
operation_name = (
GenAiOperationNameValues.TEXT_COMPLETION.value
if name == "anthropic.completion"
else GenAiOperationNameValues.CHAT.value
)
span = tracer.start_span(
name,
kind=SpanKind.CLIENT,
attributes={
GenAIAttributes.GEN_AI_SYSTEM: "Anthropic",
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
GenAIAttributes.GEN_AI_PROVIDER_NAME: GenAiSystemValues.ANTHROPIC.value,
GenAIAttributes.GEN_AI_OPERATION_NAME: operation_name,
},
)
await _ahandle_input(span, event_logger, kwargs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
MessageEvent,
ToolCall,
)
from opentelemetry.instrumentation.anthropic.span_utils import _map_finish_reason
from opentelemetry.instrumentation.anthropic.utils import (
should_emit_events,
should_send_prompts,
Expand All @@ -29,7 +30,7 @@ class Roles(Enum):
"""The valid roles for naming the message event."""

EVENT_ATTRIBUTES = {
GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.ANTHROPIC.value
GenAIAttributes.GEN_AI_PROVIDER_NAME: GenAIAttributes.GenAiSystemValues.ANTHROPIC.value
}
"""The attributes to be used for the event."""

Expand Down Expand Up @@ -69,7 +70,7 @@ def emit_response_events(event_logger: Optional[Logger], response):
"content": response.get("completion"),
"role": response.get("role", "assistant"),
},
finish_reason=response.get("stop_reason"),
finish_reason=_map_finish_reason(response.get("stop_reason")),
),
event_logger,
)
Expand Down Expand Up @@ -117,7 +118,7 @@ def emit_response_events(event_logger: Optional[Logger], response):
ChoiceEvent(
index=i,
message=message,
finish_reason=response.get("stop_reason"),
finish_reason=_map_finish_reason(response.get("stop_reason")),
tool_calls=tool_calls,
),
event_logger,
Expand Down Expand Up @@ -146,7 +147,7 @@ def emit_streaming_response_events(
"content": None,
"role": message.get("role", "assistant"),
},
finish_reason=message.get("finish_reason", "unknown"),
finish_reason=_map_finish_reason(message.get("finish_reason")),
tool_calls=tool_calls,
)
else:
Expand All @@ -159,7 +160,7 @@ def emit_streaming_response_events(
},
"role": message.get("role", "assistant"),
},
finish_reason=message.get("finish_reason", "unknown"),
finish_reason=_map_finish_reason(message.get("finish_reason")),
)
emit_event(event, event_logger)

Expand Down
Loading
Loading