Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -90,5 +90,3 @@ select = ["E", "F", "W"]
[tool.uv]
constraint-dependencies = ["urllib3>=2.6.3", "langgraph-checkpoint>=4.0.0", "pip>=25.3"]

[tool.uv.sources]
opentelemetry-semantic-conventions-ai = { path = "../opentelemetry-semantic-conventions-ai", editable = true }
28 changes: 12 additions & 16 deletions packages/opentelemetry-instrumentation-langchain/uv.lock

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,81 @@ class SpanAttributes:
GEN_AI_WATSONX_MIN_NEW_TOKENS = "llm.watsonx.min_new_tokens"
GEN_AI_WATSONX_REPETITION_PENALTY = "llm.watsonx.repetition_penalty"

# -----------------------------------------------------------------------
# Legacy LLM_* aliases (old names with original string values)
# These constants were removed or renamed in v0.5.0.
# Kept so non-migrated instrumentation packages continue to work.
# TODO: Remove once all instrumentation packages are migrated.
# -----------------------------------------------------------------------

# Removed from SpanAttributes in v0.5.0 (now in upstream OTel gen_ai_attributes)
# TODO: migrate each to its GenAIAttributes.GEN_AI_* counterpart
LLM_SYSTEM = "gen_ai.system"
LLM_REQUEST_MODEL = "gen_ai.request.model"
LLM_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens"
LLM_REQUEST_TEMPERATURE = "gen_ai.request.temperature"
LLM_REQUEST_TOP_P = "gen_ai.request.top_p"
LLM_PROMPTS = "gen_ai.prompt"
LLM_COMPLETIONS = "gen_ai.completion"
LLM_RESPONSE_MODEL = "gen_ai.response.model"
LLM_USAGE_COMPLETION_TOKENS = "gen_ai.usage.completion_tokens"
LLM_USAGE_PROMPT_TOKENS = "gen_ai.usage.prompt_tokens"
LLM_TOKEN_TYPE = "gen_ai.token.type"
LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT = "gen_ai.openai.system_fingerprint"
LLM_FREQUENCY_PENALTY = "llm.frequency_penalty"
LLM_PRESENCE_PENALTY = "llm.presence_penalty"
LLM_TOP_K = "llm.top_k"
LLM_CHAT_STOP_SEQUENCES = "llm.chat.stop_sequences"
LLM_REQUEST_FUNCTIONS = "llm.request.functions"
LLM_REQUEST_TYPE = "llm.request.type"

# Renamed LLM_* -> GEN_AI_* in v0.5.0 (name AND value changed: llm.* -> gen_ai.*)
# TODO: migrate each to its SpanAttributes.GEN_AI_* counterpart
LLM_USAGE_TOTAL_TOKENS = "llm.usage.total_tokens"
LLM_USAGE_TOKEN_TYPE = "llm.usage.token_type"
LLM_USER = "llm.user"
LLM_HEADERS = "llm.headers"
LLM_IS_STREAMING = "llm.is_streaming"
LLM_REQUEST_REPETITION_PENALTY = "llm.request.repetition_penalty"
LLM_RESPONSE_FINISH_REASON = "llm.response.finish_reason"
LLM_RESPONSE_STOP_REASON = "llm.response.stop_reason"
LLM_CONTENT_COMPLETION_CHUNK = "llm.content.completion.chunk"
LLM_REQUEST_REASONING_EFFORT = "llm.request.reasoning_effort"
LLM_USAGE_REASONING_TOKENS = "llm.usage.reasoning_tokens"

# Renamed LLM_* -> GEN_AI_* in v0.5.0 (name changed, value UNCHANGED)
# TODO: migrate each to its SpanAttributes.GEN_AI_* counterpart
LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA = "gen_ai.request.structured_output_schema"
LLM_REQUEST_REASONING_SUMMARY = "gen_ai.request.reasoning_summary"
LLM_RESPONSE_REASONING_EFFORT = "gen_ai.response.reasoning_effort"

# OpenAI (renamed LLM_OPENAI_* -> GEN_AI_OPENAI_* in v0.5.0, value unchanged)
# TODO: migrate each to its SpanAttributes.GEN_AI_OPENAI_* counterpart
LLM_OPENAI_API_BASE = "gen_ai.openai.api_base"
LLM_OPENAI_API_VERSION = "gen_ai.openai.api_version"
LLM_OPENAI_API_TYPE = "gen_ai.openai.api_type"

# Watsonx (renamed LLM_* -> GEN_AI_WATSONX_* in v0.5.0, value unchanged)
# TODO: migrate each to its SpanAttributes.GEN_AI_WATSONX_* counterpart
LLM_DECODING_METHOD = "llm.watsonx.decoding_method"
LLM_RANDOM_SEED = "llm.watsonx.random_seed"
LLM_MAX_NEW_TOKENS = "llm.watsonx.max_new_tokens"
LLM_MIN_NEW_TOKENS = "llm.watsonx.min_new_tokens"
LLM_REPETITION_PENALTY = "llm.watsonx.repetition_penalty"

# Cache attributes — VALUE changed in v0.5.0 (underscore -> dot separator).
# LLM_* aliases: for packages using the old LLM_* constant name.
# GEN_AI_*_DEPRECATED: for packages already using GEN_AI_* name but expecting old value.
# TODO: migrate to SpanAttributes.GEN_AI_USAGE_CACHE_*_INPUT_TOKENS
LLM_USAGE_CACHE_CREATION_INPUT_TOKENS = "gen_ai.usage.cache_creation_input_tokens"
LLM_USAGE_CACHE_READ_INPUT_TOKENS = "gen_ai.usage.cache_read_input_tokens"
GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS_DEPRECATED = (
"gen_ai.usage.cache_creation_input_tokens"
)
GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS_DEPRECATED = (
"gen_ai.usage.cache_read_input_tokens"
)

# Chroma db
CHROMADB_ADD_IDS_COUNT = "db.chroma.add.ids_count"
CHROMADB_ADD_EMBEDDINGS_COUNT = "db.chroma.add.embeddings_count"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,39 +85,61 @@ def test_gen_ai_openai_api_type(self):
# ---------------------------------------------------------------------------


class TestSpanAttributesOldNamesGone:
"""Assert that removed LLM_* constants no longer exist on SpanAttributes."""
class TestSpanAttributesLegacyLLMNamesPresent:
"""Assert that legacy LLM_* constants are still available for non-migrated packages."""

@pytest.mark.parametrize(
"old_name",
"legacy_name,expected_value",
[
"LLM_SYSTEM",
"LLM_REQUEST_MODEL",
"LLM_REQUEST_MAX_TOKENS",
"LLM_REQUEST_TEMPERATURE",
"LLM_REQUEST_TOP_P",
"LLM_PROMPTS",
"LLM_COMPLETIONS",
"LLM_RESPONSE_MODEL",
"LLM_USAGE_COMPLETION_TOKENS",
"LLM_USAGE_PROMPT_TOKENS",
"LLM_USAGE_CACHE_CREATION_INPUT_TOKENS",
"LLM_USAGE_CACHE_READ_INPUT_TOKENS",
"LLM_TOKEN_TYPE",
"LLM_REQUEST_TYPE",
"LLM_FREQUENCY_PENALTY",
"LLM_PRESENCE_PENALTY",
"LLM_CHAT_STOP_SEQUENCES",
"LLM_REQUEST_FUNCTIONS",
"LLM_TOP_K",
"LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT",
("LLM_SYSTEM", "gen_ai.system"),
("LLM_REQUEST_MODEL", "gen_ai.request.model"),
("LLM_REQUEST_MAX_TOKENS", "gen_ai.request.max_tokens"),
("LLM_REQUEST_TEMPERATURE", "gen_ai.request.temperature"),
("LLM_REQUEST_TOP_P", "gen_ai.request.top_p"),
("LLM_PROMPTS", "gen_ai.prompt"),
("LLM_COMPLETIONS", "gen_ai.completion"),
("LLM_RESPONSE_MODEL", "gen_ai.response.model"),
("LLM_USAGE_COMPLETION_TOKENS", "gen_ai.usage.completion_tokens"),
("LLM_USAGE_PROMPT_TOKENS", "gen_ai.usage.prompt_tokens"),
("LLM_USAGE_CACHE_CREATION_INPUT_TOKENS", "gen_ai.usage.cache_creation_input_tokens"),
("LLM_USAGE_CACHE_READ_INPUT_TOKENS", "gen_ai.usage.cache_read_input_tokens"),
("LLM_TOKEN_TYPE", "gen_ai.token.type"),
("LLM_REQUEST_TYPE", "llm.request.type"),
("LLM_FREQUENCY_PENALTY", "llm.frequency_penalty"),
("LLM_PRESENCE_PENALTY", "llm.presence_penalty"),
("LLM_CHAT_STOP_SEQUENCES", "llm.chat.stop_sequences"),
("LLM_REQUEST_FUNCTIONS", "llm.request.functions"),
("LLM_TOP_K", "llm.top_k"),
("LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT", "gen_ai.openai.system_fingerprint"),
("LLM_IS_STREAMING", "llm.is_streaming"),
("LLM_USAGE_TOTAL_TOKENS", "llm.usage.total_tokens"),
("LLM_USER", "llm.user"),
("LLM_HEADERS", "llm.headers"),
("LLM_RESPONSE_FINISH_REASON", "llm.response.finish_reason"),
("LLM_RESPONSE_STOP_REASON", "llm.response.stop_reason"),
("LLM_CONTENT_COMPLETION_CHUNK", "llm.content.completion.chunk"),
("LLM_REQUEST_REASONING_EFFORT", "llm.request.reasoning_effort"),
("LLM_USAGE_REASONING_TOKENS", "llm.usage.reasoning_tokens"),
("LLM_USAGE_TOKEN_TYPE", "llm.usage.token_type"),
("LLM_REQUEST_REPETITION_PENALTY", "llm.request.repetition_penalty"),
("LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA", "gen_ai.request.structured_output_schema"),
("LLM_REQUEST_REASONING_SUMMARY", "gen_ai.request.reasoning_summary"),
("LLM_RESPONSE_REASONING_EFFORT", "gen_ai.response.reasoning_effort"),
("LLM_OPENAI_API_BASE", "gen_ai.openai.api_base"),
("LLM_OPENAI_API_VERSION", "gen_ai.openai.api_version"),
("LLM_OPENAI_API_TYPE", "gen_ai.openai.api_type"),
("LLM_DECODING_METHOD", "llm.watsonx.decoding_method"),
("LLM_RANDOM_SEED", "llm.watsonx.random_seed"),
("LLM_MAX_NEW_TOKENS", "llm.watsonx.max_new_tokens"),
("LLM_MIN_NEW_TOKENS", "llm.watsonx.min_new_tokens"),
("LLM_REPETITION_PENALTY", "llm.watsonx.repetition_penalty"),
],
Comment thread
max-deygin-traceloop marked this conversation as resolved.
)
def test_old_name_absent(self, old_name):
assert not hasattr(SpanAttributes, old_name), (
f"SpanAttributes.{old_name} should have been removed. "
"Consumers should import from opentelemetry.semconv._incubating.attributes.gen_ai_attributes directly."
def test_legacy_name_present_with_old_value(self, legacy_name, expected_value):
assert hasattr(SpanAttributes, legacy_name), (
f"SpanAttributes.{legacy_name} must exist for backward compatibility."
)
assert getattr(SpanAttributes, legacy_name) == expected_value


# ---------------------------------------------------------------------------
Expand Down Expand Up @@ -181,14 +203,16 @@ class TestSpanAttributesOldValuesAbsent:
"gen_ai.usage.cache_creation_input_tokens", # underscore variant (pre-migration)
],
)
def test_old_value_not_in_span_attributes(self, old_value):
def test_old_value_not_in_new_span_attributes(self, old_value):
all_values = {
name: value
for name, value in vars(SpanAttributes).items()
if not name.startswith("_") and isinstance(value, str)
and not name.startswith("LLM_") # exclude legacy aliases
and not name.endswith("_DEPRECATED") # exclude deprecated aliases
}
assert old_value not in all_values.values(), (
f"Old attribute value {old_value!r} is still present in SpanAttributes. "
f"Old attribute value {old_value!r} is still present in a GEN_AI_* SpanAttribute. "
f"It should have been renamed."
)

Expand Down
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.5.0"
__version__ = "0.5.1"
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "opentelemetry-semantic-conventions-ai"
version = "0.5.0"
version = "0.5.1"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
authors = [
{ name = "Gal Kleinman", email = "gal@traceloop.com" },
Expand Down
2 changes: 1 addition & 1 deletion packages/opentelemetry-semantic-conventions-ai/uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion packages/sample-app/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ dependencies = [
"transformers>=4.46.0,<5",
"replicate>=0.22.0,<0.23.0",
"cohere>=5.11.1,<6",
"anthropic>=0.37.1,<0.38.0",
"anthropic>=0.86.0,<1",
"google-cloud-aiplatform>=1.81.0,<2",
"python-dotenv>=1.0.1,<2",
"langchain>=1.0.0,<2.0.0",
Expand Down
7 changes: 5 additions & 2 deletions packages/sample-app/sample_app/anthropic_joke_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,10 @@
from traceloop.sdk import Traceloop
from traceloop.sdk.decorators import workflow

Traceloop.init()
Traceloop.init(
app_name="sample-app",
disable_batch=True,
)


@workflow(name="pirate_joke_generator")
Expand All @@ -17,7 +20,7 @@ def joke_workflow():
"content": "Tell me a joke about OpenTelemetry",
}
],
model="claude-3-opus-20240229",
model="claude-haiku-4-5",
)
print(response.content)
return response
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def joke_workflow():
"content": "Tell me a joke about OpenTelemetry",
}
],
model="claude-3-haiku-20240307",
model="claude-haiku-4-5",
stream=True,
)
response_content = ""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def main():
}

response = client.beta.messages.create(
model="claude-sonnet-4-5-20250929",
model="claude-haiku-4-5",
max_tokens=1024,
betas=["structured-outputs-2025-11-13"],
messages=[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ async def task():

client = anthropic.AsyncAnthropic(api_key=api_key)
message = await client.messages.create(
model="claude-3-5-sonnet-20240620",
model="claude-haiku-4-5",
max_tokens=1024,
messages=[
{"role": "user", "content": "You are a sub-system ..."},
Expand Down
4 changes: 2 additions & 2 deletions packages/sample-app/sample_app/async_anthropic_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ async def generate_joke(self):
response = await anthropic.messages.create(
max_tokens=1024,
messages=[{"role": "user", "content": "Tell me a joke about Donald Trump"}],
model="claude-3-haiku-20240307",
model="claude-haiku-4-5",
)
return response.content[0].text

Expand All @@ -33,7 +33,7 @@ async def generation_helper(self):
messages=[
{"role": "user", "content": "Tell me a joke about Donald Trump"},
],
model="claude-3-haiku-20240307",
model="claude-haiku-4-5",
)

return response.content[0].text
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ async def generate_joke(self):
response = await anthropic.messages.create(
max_tokens=1024,
messages=[{"role": "user", "content": "Tell me a joke about Donald Trump"}],
model="claude-3-haiku-20240307",
model="claude-haiku-4-5",
stream=True,
top_p=0.9,
)
Expand All @@ -39,7 +39,7 @@ async def generation_helper(self):
messages=[
{"role": "user", "content": "Tell me a joke about Donald Trump"},
],
model="claude-3-haiku-20240307",
model="claude-haiku-4-5",
stream=True,
top_k=50,
)
Expand Down
3 changes: 1 addition & 2 deletions packages/traceloop-sdk/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ test = [
"pytest-recording>=0.13.1,<0.14.0",
"pydantic<3",
"pytest-asyncio>=0.23.7,<1.4.0",
"anthropic>=0.25.2,<0.85.0",
"anthropic>=0.86.0,<1",
"langchain>=1.0.0,<2.0.0",
"langchain-openai>=1.0.0,<2.0.0",
"pandas>=1.0.0",
Expand Down Expand Up @@ -155,7 +155,6 @@ select = ["E", "F", "W"]
constraint-dependencies = ["urllib3>=2.6.3", "pip>=25.3"]

[tool.uv.sources]
opentelemetry-semantic-conventions-ai = { path = "../opentelemetry-semantic-conventions-ai", editable = true }
opentelemetry-instrumentation-agno = { path = "../opentelemetry-instrumentation-agno", editable = true }
opentelemetry-instrumentation-alephalpha = { path = "../opentelemetry-instrumentation-alephalpha", editable = true }
opentelemetry-instrumentation-anthropic = { path = "../opentelemetry-instrumentation-anthropic", editable = true }
Expand Down
Loading
Loading