diff --git a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/__init__.py b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/__init__.py index 1eddfbcb3d..3f19a71932 100644 --- a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/__init__.py +++ b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/__init__.py @@ -12,6 +12,13 @@ TraceloopCallbackHandler, ) from opentelemetry.instrumentation.langchain.config import Config +from opentelemetry.instrumentation.langchain.patch import ( + create_graph_invocation_wrapper, + create_command_init_wrapper, + create_middleware_hook_wrapper, + create_async_middleware_hook_wrapper, + create_agent_wrapper, +) from opentelemetry.instrumentation.langchain.utils import is_package_available from opentelemetry.instrumentation.langchain.version import __version__ from opentelemetry.instrumentation.utils import unwrap @@ -96,6 +103,9 @@ def _instrument(self, **kwargs): wrapper=_BaseCallbackManagerInitWrapper(traceloopCallbackHandler), ) + # Wrap LangGraph components if available + self._wrap_langgraph_components(tracer) + if not self.disable_trace_context_propagation: self._wrap_openai_functions_for_tracing(traceloopCallbackHandler) @@ -179,8 +189,160 @@ def _wrap_openai_functions_for_tracing(self, traceloopCallbackHandler): # wrapper=openai_tracing_wrapper, # ) + def _wrap_langgraph_components(self, tracer): + """Wrap LangGraph components for instrumentation.""" + # Wrap Pregel.stream and Pregel.astream (graph invocation) + if is_package_available("langgraph"): + try: + wrap_function_wrapper( + module="langgraph.pregel", + name="Pregel.stream", + wrapper=create_graph_invocation_wrapper(tracer, is_async=False), + ) + wrap_function_wrapper( + module="langgraph.pregel", + name="Pregel.astream", + wrapper=create_graph_invocation_wrapper(tracer, is_async=True), + ) + except Exception as e: + logger.debug("Failed to wrap Pregel methods: %s", e) + + # Wrap Command.__init__ to capture routing commands + try: + wrap_function_wrapper( + module="langgraph.types", + name="Command.__init__", + wrapper=create_command_init_wrapper(tracer), + ) + except Exception as e: + logger.debug("Failed to wrap Command.__init__: %s", e) + + # Wrap AgentMiddleware hooks if langchain is available + if is_package_available("langchain"): + self._wrap_middleware_hooks(tracer) + + # Wrap agent factories (method checks langgraph/langchain availability internally) + self._wrap_agent_factories(tracer) + + def _wrap_agent_factories(self, tracer): + """Wrap agent factory functions for instrumentation.""" + # LangGraph prebuilt agents - patch both actual module and re-export location + if is_package_available("langgraph"): + langgraph_agent_wrapper = create_agent_wrapper(tracer, provider_name="langgraph") + # Patch the actual module where the function is defined + try: + wrap_function_wrapper( + module="langgraph.prebuilt.chat_agent_executor", + name="create_react_agent", + wrapper=langgraph_agent_wrapper, + ) + except Exception as e: + logger.debug("Failed to wrap langgraph.prebuilt.chat_agent_executor.create_react_agent: %s", e) + # Also patch the re-export location for imports from langgraph.prebuilt + try: + wrap_function_wrapper( + module="langgraph.prebuilt", + name="create_react_agent", + wrapper=langgraph_agent_wrapper, + ) + except Exception as e: + logger.debug("Failed to wrap langgraph.prebuilt.create_react_agent: %s", e) + + # LangChain agents - patch both actual module and re-export location + if is_package_available("langchain"): + agent_wrapper = create_agent_wrapper(tracer, provider_name="langchain") + # Patch the actual module where the function is defined + try: + wrap_function_wrapper( + module="langchain.agents.factory", + name="create_agent", + wrapper=agent_wrapper, + ) + except Exception as e: + logger.debug("Failed to wrap langchain.agents.factory.create_agent: %s", e) + # Also patch the re-export location for imports from langchain.agents + try: + wrap_function_wrapper( + module="langchain.agents", + name="create_agent", + wrapper=agent_wrapper, + ) + except Exception as e: + logger.debug("Failed to wrap langchain.agents.create_agent: %s", e) + + def _wrap_middleware_hooks(self, tracer): + """Wrap AgentMiddleware hook methods for instrumentation.""" + # Sync hooks + sync_hooks = ["before_model", "after_model", "before_agent", "after_agent"] + for hook_name in sync_hooks: + try: + wrap_function_wrapper( + module="langchain.agents.middleware.types", + name=f"AgentMiddleware.{hook_name}", + wrapper=create_middleware_hook_wrapper(tracer, hook_name), + ) + except Exception as e: + logger.debug("Failed to wrap AgentMiddleware.%s: %s", hook_name, e) + + # Async hooks + async_hooks = ["abefore_model", "aafter_model", "abefore_agent", "aafter_agent"] + for hook_name in async_hooks: + try: + wrap_function_wrapper( + module="langchain.agents.middleware.types", + name=f"AgentMiddleware.{hook_name}", + wrapper=create_async_middleware_hook_wrapper(tracer, hook_name), + ) + except Exception as e: + logger.debug("Failed to wrap AgentMiddleware.%s: %s", hook_name, e) + def _uninstrument(self, **kwargs): unwrap("langchain_core.callbacks", "BaseCallbackManager.__init__") + + # Unwrap LangGraph components + if is_package_available("langgraph"): + try: + unwrap("langgraph.pregel", "Pregel.stream") + unwrap("langgraph.pregel", "Pregel.astream") + except Exception: + pass + try: + unwrap("langgraph.types", "Command.__init__") + except Exception: + pass + + # Unwrap AgentMiddleware hooks + if is_package_available("langchain"): + sync_hooks = ["before_model", "after_model", "before_agent", "after_agent"] + async_hooks = ["abefore_model", "aafter_model", "abefore_agent", "aafter_agent"] + for hook_name in sync_hooks + async_hooks: + try: + unwrap("langchain.agents.middleware.types", f"AgentMiddleware.{hook_name}") + except Exception: + pass + + # Unwrap LangGraph agent factories (both actual module and re-export) + if is_package_available("langgraph"): + try: + unwrap("langgraph.prebuilt.chat_agent_executor", "create_react_agent") + except Exception: + pass + try: + unwrap("langgraph.prebuilt", "create_react_agent") + except Exception: + pass + + # Unwrap LangChain agent factories (both actual module and re-export) + if is_package_available("langchain"): + try: + unwrap("langchain.agents.factory", "create_agent") + except Exception: + pass + try: + unwrap("langchain.agents", "create_agent") + except Exception: + pass + if not self.disable_trace_context_propagation: if is_package_available("langchain_community"): unwrap("langchain_community.llms.openai", "BaseOpenAI._generate") diff --git a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py index 6a283c9a2a..9ab46f5ba4 100644 --- a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py +++ b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py @@ -1,3 +1,4 @@ +import contextvars import json import time from typing import Any, Dict, List, Optional, Type, Union @@ -59,17 +60,31 @@ from opentelemetry.semconv._incubating.attributes import ( gen_ai_attributes as GenAIAttributes, ) +from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiOperationNameValues from opentelemetry.semconv_ai import ( SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, + GenAICustomOperationName, LLMRequestTypeValues, SpanAttributes, TraceloopSpanKindValues, ) from opentelemetry.trace import SpanKind, Tracer, set_span_in_context +from opentelemetry.instrumentation.langchain.patch import ( + LANGGRAPH_FLOW_KEY, + LANGGRAPH_GRAPH_SPAN_KEY, + LANGGRAPH_FIRST_CHILD_PENDING_KEY, +) from opentelemetry.trace.span import Span from opentelemetry.trace.status import Status, StatusCode from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE +# Context variable for tracking current LangGraph node (for Command source tracking) +# Using ContextVar instead of OTel context to avoid detach issues in async scenarios +_langgraph_current_node: contextvars.ContextVar[str | None] = contextvars.ContextVar( + 'langgraph_current_node', + default=None +) + def _extract_class_name_from_serialized(serialized: Optional[dict[str, Any]]) -> str: """ @@ -159,7 +174,7 @@ def __init__( @staticmethod def _get_name_from_callback( - serialized: dict[str, Any], + serialized: Optional[dict[str, Any]], _tags: Optional[list[str]] = None, _metadata: Optional[dict[str, Any]] = None, **kwargs: Any, @@ -169,9 +184,9 @@ def _get_name_from_callback( return serialized["kwargs"]["name"] if kwargs.get("name"): return kwargs["name"] - if serialized.get("name"): + if serialized and serialized.get("name"): return serialized["name"] - if "id" in serialized: + if serialized and "id" in serialized: return serialized["id"][-1] return "unknown" @@ -279,7 +294,25 @@ def _create_span( kind=kind, ) else: - span = self.tracer.start_span(span_name, kind=kind) + # Check if we're in a LangGraph flow and this is the first child + graph_span_holder = context_api.get_value(LANGGRAPH_GRAPH_SPAN_KEY) + first_child_pending = context_api.get_value(LANGGRAPH_FIRST_CHILD_PENDING_KEY) + + if graph_span_holder is not None and first_child_pending and first_child_pending[0]: + # This is the first child of the graph span - parent it using + # the SpanHolder's stored context for correct span parenting + span = self.tracer.start_span( + span_name, + context=graph_span_holder.context, + kind=kind, + ) + # Flip the flag so subsequent spans use normal parenting. + # Note: This mutable list pattern is intentional. LangGraph callbacks + # are single-threaded per graph invocation, so this is safe. + # The mutable list is used because OTel context values are immutable. + first_child_pending[0] = False + else: + span = self.tracer.start_span(span_name, kind=kind) token = self._safe_attach_context(span) @@ -314,8 +347,25 @@ def _create_task_span( entity_name: str = "", entity_path: str = "", metadata: Optional[dict[str, Any]] = None, + serialized: Optional[dict[str, Any]] = None, ) -> Span: - span_name = f"{name}.{kind.value}" + # Determine span type + is_agent = kind in ( + TraceloopSpanKindValues.WORKFLOW, + TraceloopSpanKindValues.AGENT, + ) + is_tool = kind == TraceloopSpanKindValues.TOOL + + if is_agent: + # Keep existing workflow span naming + span_name = f"{name}.{kind.value}" + elif is_tool: + # Use OpenTelemetry GenAI spec naming for tools + span_name = f"execute_tool {name}" + else: + # Use OpenTelemetry GenAI spec naming for tasks + span_name = f"execute_task {name}" + span = self._create_span( run_id, parent_run_id, @@ -326,9 +376,53 @@ def _create_task_span( metadata=metadata, ) + # Set traceloop attributes for backwards compatibility _set_span_attribute(span, SpanAttributes.TRACELOOP_SPAN_KIND, kind.value) _set_span_attribute(span, SpanAttributes.TRACELOOP_ENTITY_NAME, entity_name) + # Set GenAI semantic convention attributes + # Check LangGraph flow context to set appropriate provider + langgraph_flow = context_api.get_value(LANGGRAPH_FLOW_KEY) + provider_name = "langgraph" if langgraph_flow else "langchain" + _set_span_attribute(span, GenAIAttributes.GEN_AI_PROVIDER_NAME, provider_name) + + if is_agent: + # Set agent-specific attributes + _set_span_attribute( + span, + GenAIAttributes.GEN_AI_OPERATION_NAME, + GenAiOperationNameValues.INVOKE_AGENT.value, + ) + _set_span_attribute(span, GenAIAttributes.GEN_AI_AGENT_NAME, name) + _set_span_attribute(span, GenAIAttributes.GEN_AI_AGENT_ID, str(run_id)) + elif is_tool: + # Set tool-specific attributes + _set_span_attribute( + span, + GenAIAttributes.GEN_AI_OPERATION_NAME, + GenAiOperationNameValues.EXECUTE_TOOL.value, + ) + _set_span_attribute(span, GenAIAttributes.GEN_AI_TOOL_NAME, name) + _set_span_attribute(span, GenAIAttributes.GEN_AI_TOOL_TYPE, "function") + + # Extract tool description if available + description = (serialized or {}).get("description", "") + if description: + _set_span_attribute(span, GenAIAttributes.GEN_AI_TOOL_DESCRIPTION, description) + else: + # Set task-specific attributes + _set_span_attribute( + span, + GenAIAttributes.GEN_AI_OPERATION_NAME, + GenAICustomOperationName.EXECUTE_TASK.value, + ) + _set_span_attribute(span, SpanAttributes.GEN_AI_TASK_NAME, name) + _set_span_attribute(span, SpanAttributes.GEN_AI_TASK_ID, str(run_id)) + if parent_run_id: + _set_span_attribute( + span, SpanAttributes.GEN_AI_TASK_PARENT_ID, str(parent_run_id) + ) + return span def _create_llm_span( @@ -359,6 +453,9 @@ def _create_llm_span( _set_span_attribute(span, GenAIAttributes.GEN_AI_SYSTEM, vendor) _set_span_attribute(span, SpanAttributes.LLM_REQUEST_TYPE, request_type.value) + _set_span_attribute( + span, GenAIAttributes.GEN_AI_OPERATION_NAME, GenAICustomOperationName.LLM_REQUEST.value + ) # we already have an LLM span by this point, # so skip any downstream instrumentation from here @@ -433,7 +530,39 @@ def on_chain_start( ), ) - # The start_time is now automatically set when creating the SpanHolder + # Extract conversation ID from config (LangGraph thread_id) + config = kwargs.get("config") or (metadata.get("config", {}) if metadata else {}) + if config: + configurable = config.get("configurable", {}) if isinstance(config, dict) else {} + thread_id = configurable.get("thread_id") + if thread_id: + _set_span_attribute( + span, GenAIAttributes.GEN_AI_CONVERSATION_ID, str(thread_id) + ) + + # Set current node in context for Command source tracking. + # Using ContextVar instead of OTel context to avoid detach issues in async scenarios. + # ContextVars are automatically scoped to the current context. + if metadata and metadata.get("langgraph_node") == name: + try: + _langgraph_current_node.set(name) + except Exception: + pass + + if not should_emit_events() and should_send_prompts(): + input_json = json.dumps( + { + "inputs": inputs, + "tags": tags, + "metadata": metadata, + "kwargs": kwargs, + }, + ensure_ascii=False, + cls=CallbackFilteredJSONEncoder, + ) + # Set both for backwards compatibility + span.set_attribute(SpanAttributes.TRACELOOP_ENTITY_INPUT, input_json) + span.set_attribute(SpanAttributes.GEN_AI_TASK_INPUT, input_json) @dont_throw def on_chain_end( @@ -450,15 +579,19 @@ def on_chain_end( span_holder = self.spans[run_id] span = span_holder.span + + # Set task status to success + _set_span_attribute(span, SpanAttributes.GEN_AI_TASK_STATUS, "success") + if not should_emit_events() and should_send_prompts(): - span.set_attribute( - SpanAttributes.TRACELOOP_ENTITY_OUTPUT, - json.dumps( - {"outputs": outputs, "kwargs": kwargs}, - ensure_ascii=False, - cls=CallbackFilteredJSONEncoder, - ), + output_json = json.dumps( + {"outputs": outputs, "kwargs": kwargs}, + ensure_ascii=False, + cls=CallbackFilteredJSONEncoder, ) + # Set both for backwards compatibility + span.set_attribute(SpanAttributes.TRACELOOP_ENTITY_OUTPUT, output_json) + span.set_attribute(SpanAttributes.GEN_AI_TASK_OUTPUT, output_json) self._end_span(span, run_id) if parent_run_id is None: @@ -675,22 +808,24 @@ def on_tool_start( workflow_name, name, entity_path, + metadata=metadata, + serialized=serialized, ) if not should_emit_events() and should_send_prompts(): - span.set_attribute( - SpanAttributes.TRACELOOP_ENTITY_INPUT, - json.dumps( - { - "input_str": input_str, - "tags": tags, - "metadata": metadata, - "inputs": inputs, - "kwargs": kwargs, - }, - ensure_ascii=False, - cls=CallbackFilteredJSONEncoder, - ), + input_json = json.dumps( + { + "input_str": input_str, + "tags": tags, + "metadata": metadata, + "inputs": inputs, + "kwargs": kwargs, + }, + ensure_ascii=False, + cls=CallbackFilteredJSONEncoder, ) + # Set both for backwards compatibility + span.set_attribute(SpanAttributes.TRACELOOP_ENTITY_INPUT, input_json) + span.set_attribute(GenAIAttributes.GEN_AI_TOOL_CALL_ARGUMENTS, input_json) @dont_throw def on_tool_end( @@ -706,15 +841,119 @@ def on_tool_end( return span = self._get_span(run_id) + + # Set task status to success + _set_span_attribute(span, SpanAttributes.GEN_AI_TASK_STATUS, "success") + + if not should_emit_events() and should_send_prompts(): + output_json = json.dumps( + {"output": output, "kwargs": kwargs}, + ensure_ascii=False, + cls=CallbackFilteredJSONEncoder, + ) + # Set both for backwards compatibility + span.set_attribute(SpanAttributes.TRACELOOP_ENTITY_OUTPUT, output_json) + span.set_attribute(GenAIAttributes.GEN_AI_TOOL_CALL_RESULT, output_json) + self._end_span(span, run_id) + + @dont_throw + def on_retriever_start( + self, + serialized: dict[str, Any], + query: str, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + tags: Optional[list[str]] = None, + metadata: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """Run when retriever starts running.""" + if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY): + return + + name = self._get_name_from_callback(serialized, **kwargs) + workflow_name = self.get_workflow_name(parent_run_id) + entity_path = self.get_entity_path(parent_run_id) + + # Create span with vector_db_retrieve naming convention + span_name = f"vector_db_retrieve {name}" + span = self._create_span( + run_id, + parent_run_id, + span_name, + SpanKind.CLIENT, + workflow_name=workflow_name, + entity_name=name, + entity_path=entity_path, + metadata=metadata, + ) + + # Set GenAI semantic convention attributes + _set_span_attribute( + span, GenAIAttributes.GEN_AI_OPERATION_NAME, GenAICustomOperationName.VECTOR_DB_RETRIEVE.value + ) + # Set provider name based on LangGraph flow context + langgraph_flow = context_api.get_value(LANGGRAPH_FLOW_KEY) + provider_name = "langgraph" if langgraph_flow else "langchain" + _set_span_attribute(span, GenAIAttributes.GEN_AI_PROVIDER_NAME, provider_name) + _set_span_attribute(span, SpanAttributes.TRACELOOP_SPAN_KIND, TraceloopSpanKindValues.TASK.value) + _set_span_attribute(span, SpanAttributes.TRACELOOP_ENTITY_NAME, name) + + # Set task input (query and parameters) + if not should_emit_events() and should_send_prompts(): + input_json = json.dumps( + { + "query": query, + "tags": tags, + "metadata": metadata, + "kwargs": kwargs, + }, + ensure_ascii=False, + cls=CallbackFilteredJSONEncoder, + ) + _set_span_attribute(span, SpanAttributes.TRACELOOP_ENTITY_INPUT, input_json) + _set_span_attribute(span, SpanAttributes.GEN_AI_TASK_INPUT, input_json) + + @dont_throw + def on_retriever_end( + self, + documents: list, + *, + run_id: UUID, + parent_run_id: Optional[UUID] = None, + **kwargs: Any, + ) -> None: + """Run when retriever ends running.""" + if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY): + return + + span = self._get_span(run_id) + + # Set task status to success + _set_span_attribute(span, SpanAttributes.GEN_AI_TASK_STATUS, "success") + + # Set task output (documents) if not should_emit_events() and should_send_prompts(): - span.set_attribute( - SpanAttributes.TRACELOOP_ENTITY_OUTPUT, - json.dumps( - {"output": output, "kwargs": kwargs}, - ensure_ascii=False, - cls=CallbackFilteredJSONEncoder, - ), + # Extract document content for output + docs_output = [] + for doc in documents: + if hasattr(doc, 'page_content'): + docs_output.append({ + "page_content": doc.page_content, + "metadata": getattr(doc, 'metadata', {}) + }) + else: + docs_output.append(str(doc)) + + output_json = json.dumps( + {"documents": docs_output, "count": len(documents)}, + ensure_ascii=False, + cls=CallbackFilteredJSONEncoder, ) + _set_span_attribute(span, SpanAttributes.TRACELOOP_ENTITY_OUTPUT, output_json) + _set_span_attribute(span, SpanAttributes.GEN_AI_TASK_OUTPUT, output_json) + self._end_span(span, run_id) def get_parent_span(self, parent_run_id: Optional[str] = None): @@ -757,6 +996,8 @@ def _handle_error( return span = self._get_span(run_id) + # Set task status to failure + _set_span_attribute(span, SpanAttributes.GEN_AI_TASK_STATUS, "failure") span.set_status(Status(StatusCode.ERROR), str(error)) span.record_exception(error) self._end_span(span, run_id) diff --git a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/langgraph_utils.py b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/langgraph_utils.py new file mode 100644 index 0000000000..920de7c179 --- /dev/null +++ b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/langgraph_utils.py @@ -0,0 +1,51 @@ +"""Utilities for extracting LangGraph graph structure.""" + +from typing import Any + + +def extract_graph_structure(graph_instance: Any) -> tuple[list[str], list[str]]: + """ + Extract graph nodes and edges as separate lists. + + This extracts the workflow topology to populate the gen_ai.workflow.nodes + and gen_ai.workflow.edges attributes as specified in the OpenTelemetry + GenAI semantic conventions. + + Args: + graph_instance: A LangGraph Pregel or CompiledGraph instance + + Returns: + Tuple of (nodes, edges) where: + - nodes: List of node names (excluding __start__/__end__) + - edges: List of "source -> target" strings + """ + # Get the graph structure via get_graph() method + if hasattr(graph_instance, "get_graph"): + graph = graph_instance.get_graph() + else: + graph = graph_instance + + # Extract nodes (excluding __start__ and __end__ special nodes) + nodes = [] + if hasattr(graph, "nodes"): + for node_id in graph.nodes: + if node_id not in ("__start__", "__end__"): + nodes.append(node_id) + + # Extract edges as "source -> target" strings + edges = [] + if hasattr(graph, "edges"): + for edge in graph.edges: + # Handle different edge formats + if hasattr(edge, "source") and hasattr(edge, "target"): + source, target = edge.source, edge.target + elif isinstance(edge, tuple) and len(edge) >= 2: + source, target = edge[0], edge[1] + else: + continue + + # Skip special nodes + if source not in ("__start__", "__end__") and target not in ("__start__", "__end__"): + edges.append(f"{source} -> {target}") + + return nodes, edges diff --git a/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/patch.py b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/patch.py new file mode 100644 index 0000000000..6f6ce72ea8 --- /dev/null +++ b/packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/patch.py @@ -0,0 +1,503 @@ +"""Patching utilities for LangGraph instrumentation.""" + +import json +import logging +from opentelemetry import context as context_api +from opentelemetry import trace +from opentelemetry.trace import Tracer, Span, SpanKind, Status, StatusCode +from opentelemetry.semconv._incubating.attributes import gen_ai_attributes as GenAIAttributes +from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiOperationNameValues +from opentelemetry.semconv_ai import GenAICustomOperationName, SpanAttributes +from opentelemetry.instrumentation.langchain.span_utils import SpanHolder +from typing import Any + +logger = logging.getLogger(__name__) + +# Import ContextVar for reading current node (set by callback_handler) +# Importing at runtime to avoid circular import issues +def _get_current_node_contextvar(): + """Lazy import to avoid circular dependency.""" + from opentelemetry.instrumentation.langchain.callback_handler import _langgraph_current_node + return _langgraph_current_node + +# Context key for marking LangGraph flow +LANGGRAPH_FLOW_KEY = "langgraph_flow" +# Context key for storing the graph SpanHolder as parent for callback-created spans +LANGGRAPH_GRAPH_SPAN_KEY = "langgraph_graph_span" +# Context key for tracking if first child of graph span is pending (mutable list [bool]) +LANGGRAPH_FIRST_CHILD_PENDING_KEY = "langgraph_first_child_pending" + + +def _set_graph_span_attributes( + graph_span: Span, + instance: Any, + graph_name: str, + kwargs: dict, + args: tuple +) -> None: + """ + Set common GenAI attributes on graph span. + + This helper function consolidates attribute setting to avoid duplication + between sync and async wrappers. + + Args: + graph_span: The span to set attributes on + instance: The graph instance + graph_name: Name of the graph + kwargs: Keyword arguments passed to the graph invocation + args: Positional arguments passed to the graph invocation + """ + from opentelemetry.instrumentation.langchain.langgraph_utils import extract_graph_structure + + # Set GenAI semantic convention attributes + graph_span.set_attribute(GenAIAttributes.GEN_AI_PROVIDER_NAME, "langgraph") + graph_span.set_attribute( + GenAIAttributes.GEN_AI_OPERATION_NAME, GenAiOperationNameValues.INVOKE_AGENT.value + ) + graph_span.set_attribute(GenAIAttributes.GEN_AI_AGENT_NAME, graph_name) + + # Extract conversation ID from config + config = kwargs.get('config') or (args[1] if len(args) > 1 else None) + if config and isinstance(config, dict): + configurable = config.get("configurable", {}) + thread_id = configurable.get("thread_id") + if thread_id: + graph_span.set_attribute(GenAIAttributes.GEN_AI_CONVERSATION_ID, str(thread_id)) + + # Extract workflow structure (best-effort with debug logging) + try: + nodes, edges = extract_graph_structure(instance) + if nodes: + graph_span.set_attribute(SpanAttributes.GEN_AI_WORKFLOW_NODES, nodes) + if edges: + graph_span.set_attribute(SpanAttributes.GEN_AI_WORKFLOW_EDGES, edges) + except Exception as e: + logger.debug("Failed to extract LangGraph workflow structure: %s", e) + + +def _get_graph_name(instance, args, kwargs) -> str: + """ + Get the graph name from available sources in order of priority: + 1. config['run_name'] (from args[1] or kwargs['config']) + 2. instance.get_name() method (matches LangGraph's behavior) + 3. Default to "LangGraph" + + Note: stream/astream signature is (self, input, config=None, *, ...) + so config can be args[1] (positional) or kwargs['config'] (keyword). + """ + # Config can be in args[1] (positional) or kwargs['config'] (keyword) + config = None + if len(args) > 1: + config = args[1] + if config is None: + config = kwargs.get('config') + + # Try run_name from config first (config could be RunnableConfig object, not dict) + if config and isinstance(config, dict): + run_name = config.get('run_name') + if run_name: + return run_name + + # Fallback to instance.get_name() to match LangGraph behavior + if hasattr(instance, 'get_name'): + return instance.get_name() + + # Default + return "LangGraph" + + +def create_graph_invocation_wrapper(tracer: Tracer, is_async: bool = False): + """ + Factory to create wrappers for graph invocation methods. + + Args: + tracer: OpenTelemetry tracer instance + is_async: Whether to create an async wrapper + + Returns: + Wrapper function for sync or async graph invocation + """ + def wrapper(wrapped, instance, args, kwargs): + """Wrapper for Pregel.stream - yields from the generator while managing span lifecycle.""" + graph_name = _get_graph_name(instance, args, kwargs) + + # Set LangGraph flow context before creating spans + langgraph_ctx = context_api.attach( + context_api.set_value(LANGGRAPH_FLOW_KEY, graph_name) + ) + + # Create graph span with GenAI convention naming: invoke_agent {agent_name} + graph_span = tracer.start_span(f"invoke_agent {graph_name}") + + # Set all graph span attributes using helper function + _set_graph_span_attributes(graph_span, instance, graph_name, kwargs, args) + + # Attach span to context for parent-child linking, wrapped in SpanHolder + ctx_with_span = trace.set_span_in_context(graph_span) + graph_span_holder = SpanHolder( + span=graph_span, + token=None, + context=ctx_with_span, + children=[], + workflow_name=graph_name, + entity_name=graph_name, + entity_path=graph_name, + ) + ctx_with_span = context_api.set_value(LANGGRAPH_GRAPH_SPAN_KEY, graph_span_holder, ctx_with_span) + ctx_with_span = context_api.set_value(LANGGRAPH_FIRST_CHILD_PENDING_KEY, [True], ctx_with_span) + graph_span_ctx = context_api.attach(ctx_with_span) + + try: + for item in wrapped(*args, **kwargs): + yield item + except BaseException as e: + graph_span.set_status(Status(StatusCode.ERROR, str(e))) + graph_span.record_exception(e) + raise + finally: + graph_span.end() + context_api.detach(graph_span_ctx) + context_api.detach(langgraph_ctx) + + async def async_wrapper(wrapped, instance, args, kwargs): + """Wrapper for Pregel.astream - yields from the async generator while managing span lifecycle.""" + graph_name = _get_graph_name(instance, args, kwargs) + + # Set LangGraph flow context before creating spans + langgraph_ctx = context_api.attach( + context_api.set_value(LANGGRAPH_FLOW_KEY, graph_name) + ) + + # Create graph span with GenAI convention naming: invoke_agent {agent_name} + graph_span = tracer.start_span(f"invoke_agent {graph_name}") + + # Set all graph span attributes using helper function + _set_graph_span_attributes(graph_span, instance, graph_name, kwargs, args) + + # Attach span to context for parent-child linking, wrapped in SpanHolder + ctx_with_span = trace.set_span_in_context(graph_span) + graph_span_holder = SpanHolder( + span=graph_span, + token=None, + context=ctx_with_span, + children=[], + workflow_name=graph_name, + entity_name=graph_name, + entity_path=graph_name, + ) + ctx_with_span = context_api.set_value(LANGGRAPH_GRAPH_SPAN_KEY, graph_span_holder, ctx_with_span) + ctx_with_span = context_api.set_value(LANGGRAPH_FIRST_CHILD_PENDING_KEY, [True], ctx_with_span) + graph_span_ctx = context_api.attach(ctx_with_span) + + try: + async for item in wrapped(*args, **kwargs): + yield item + except BaseException as e: + graph_span.set_status(Status(StatusCode.ERROR, str(e))) + graph_span.record_exception(e) + raise + finally: + graph_span.end() + context_api.detach(graph_span_ctx) + context_api.detach(langgraph_ctx) + + return async_wrapper if is_async else wrapper + + +def create_command_init_wrapper(tracer: Tracer): + """ + Wrapper for Command.__init__ to capture command creation. + + Creates a span when a Command object is created, capturing only: + - Source node (from context) + - Destination node(s) from goto parameter + + Args: + tracer: OpenTelemetry tracer instance + + Returns: + Wrapper function for Command.__init__ + """ + def wrapper(wrapped, instance, args, kwargs): + # Call original __init__ first + result = wrapped(*args, **kwargs) + + # Only create span if goto is specified (indicates routing) + if instance.goto: + # Get source node from ContextVar (set by callback_handler) + source_node = _get_current_node_contextvar().get() + + # Extract goto destination(s) + goto_destinations = _extract_goto_destinations(instance.goto) + + # Create span only if we have both source and destination + if source_node and isinstance(source_node, str) and goto_destinations: + # Format span name as "goto {target}" or "goto {target1, target2}" for multiple + if len(goto_destinations) == 1: + target_str = goto_destinations[0] + else: + target_str = ", ".join(goto_destinations) + span_name = f"goto {target_str}" + + with tracer.start_as_current_span( + span_name, + kind=SpanKind.INTERNAL + ) as span: + # Set GenAI operation name + span.set_attribute(GenAIAttributes.GEN_AI_OPERATION_NAME, "goto") + + span.set_attribute( + SpanAttributes.LANGGRAPH_COMMAND_SOURCE_NODE, source_node + ) + + if len(goto_destinations) == 1: + span.set_attribute( + SpanAttributes.LANGGRAPH_COMMAND_GOTO_NODE, goto_destinations[0] + ) + else: + span.set_attribute( + SpanAttributes.LANGGRAPH_COMMAND_GOTO_NODES, json.dumps(goto_destinations) + ) + + return result + + return wrapper + + +def _extract_goto_destinations(goto: Any) -> list[str]: + """ + Extract destination node names from goto parameter. + + Args: + goto: Can be string, Send, or sequence of strings/Sends + + Returns: + List of destination node names + """ + try: + from langgraph.types import Send + except ImportError: + # If Send is not available, just handle strings + Send = type(None) + + destinations = [] + + if isinstance(goto, str): + destinations.append(goto) + elif Send is not type(None) and isinstance(goto, Send): + destinations.append(goto.node) + elif isinstance(goto, (list, tuple)): + for item in goto: + if isinstance(item, str): + destinations.append(item) + elif Send is not type(None) and isinstance(item, Send): + destinations.append(item.node) + + return destinations + + +def _set_middleware_span_attributes( + span: Span, + middleware_name: str, + hook_name: str +) -> None: + """ + Set common GenAI attributes on middleware span. + + This helper function consolidates attribute setting to avoid duplication + between sync and async middleware wrappers. + + Args: + span: The span to set attributes on + middleware_name: Name of the middleware class + hook_name: Name of the hook being executed + """ + span.set_attribute( + GenAIAttributes.GEN_AI_OPERATION_NAME, + GenAICustomOperationName.EXECUTE_TASK.value, + ) + span.set_attribute(SpanAttributes.GEN_AI_TASK_KIND, middleware_name) + span.set_attribute( + SpanAttributes.GEN_AI_TASK_NAME, f"{middleware_name}.{hook_name}" + ) + span.set_attribute(GenAIAttributes.GEN_AI_PROVIDER_NAME, "langchain") + + +def create_middleware_hook_wrapper(tracer: Tracer, hook_name: str): + """ + Wrapper for AgentMiddleware hook methods (before_model, after_model, etc.) + + Creates a span when a middleware hook is called, capturing: + - Middleware class name as gen_ai.task.kind + - Hook name as part of gen_ai.task.name + + Args: + tracer: OpenTelemetry tracer instance + hook_name: Name of the hook being wrapped (e.g., "before_model") + + Returns: + Wrapper function for the middleware hook + """ + def wrapper(wrapped, instance, args, kwargs): + middleware_name = instance.__class__.__name__ + span_name = f"execute_task {middleware_name}.{hook_name}" + + with tracer.start_as_current_span(span_name, kind=SpanKind.INTERNAL) as span: + _set_middleware_span_attributes(span, middleware_name, hook_name) + + try: + result = wrapped(*args, **kwargs) + span.set_attribute(SpanAttributes.GEN_AI_TASK_STATUS, "success") + return result + except Exception as e: + span.set_attribute(SpanAttributes.GEN_AI_TASK_STATUS, "failure") + span.record_exception(e) + raise + + return wrapper + + +def create_async_middleware_hook_wrapper(tracer: Tracer, hook_name: str): + """ + Async wrapper for AgentMiddleware hook methods (abefore_model, aafter_model, etc.) + + Args: + tracer: OpenTelemetry tracer instance + hook_name: Name of the hook being wrapped (e.g., "abefore_model") + + Returns: + Async wrapper function for the middleware hook + """ + async def async_wrapper(wrapped, instance, args, kwargs): + middleware_name = instance.__class__.__name__ + span_name = f"execute_task {middleware_name}.{hook_name}" + + with tracer.start_as_current_span(span_name, kind=SpanKind.INTERNAL) as span: + _set_middleware_span_attributes(span, middleware_name, hook_name) + + try: + result = await wrapped(*args, **kwargs) + span.set_attribute(SpanAttributes.GEN_AI_TASK_STATUS, "success") + return result + except Exception as e: + span.set_attribute(SpanAttributes.GEN_AI_TASK_STATUS, "failure") + span.record_exception(e) + raise + + return async_wrapper + + +def _extract_tool_definition(tool: Any) -> dict | None: + """ + Extract tool definition in OpenAI function format. + + Returns a dict with type, name, description, and parameters. + """ + tool_def = {"type": "function"} + + # Extract name + if hasattr(tool, 'name'): + tool_def["name"] = tool.name + elif isinstance(tool, dict) and 'name' in tool: + tool_def["name"] = tool['name'] + elif hasattr(tool, '__name__'): + tool_def["name"] = tool.__name__ + else: + return None + + # Extract description + if hasattr(tool, 'description'): + tool_def["description"] = tool.description + elif isinstance(tool, dict) and 'description' in tool: + tool_def["description"] = tool['description'] + elif hasattr(tool, '__doc__') and tool.__doc__: + tool_def["description"] = tool.__doc__ + + # Extract parameters schema + parameters = None + if hasattr(tool, 'args_schema') and tool.args_schema: + # LangChain tools with Pydantic schema + try: + if hasattr(tool.args_schema, 'model_json_schema'): + parameters = tool.args_schema.model_json_schema() + elif hasattr(tool.args_schema, 'schema'): + parameters = tool.args_schema.schema() + except Exception: + pass + elif isinstance(tool, dict) and 'parameters' in tool: + parameters = tool['parameters'] + + if parameters: + tool_def["parameters"] = parameters + + return tool_def + + +def create_agent_wrapper(tracer: Tracer, provider_name: str = "langchain"): + """ + Wrapper for create_agent factory functions. + + Captures agent creation with GenAI semantic convention attributes. + + Args: + tracer: OpenTelemetry tracer instance + provider_name: The provider name to use (e.g., "langgraph" or "langchain") + + Returns: + Wrapper function for agent factory + """ + def wrapper(wrapped, _instance, args, kwargs): + # Extract agent name from kwargs or use function name + agent_name = kwargs.get("name") + if not agent_name: + # Use the wrapped function's name as fallback + agent_name = getattr(wrapped, '__name__', 'agent') + # Clean up the name (e.g., "create_react_agent" -> "react_agent") + if agent_name.startswith('create_'): + agent_name = agent_name[7:] + + span_name = f"create_agent {agent_name}" + + with tracer.start_as_current_span(span_name, kind=SpanKind.INTERNAL) as span: + span.set_attribute( + GenAIAttributes.GEN_AI_OPERATION_NAME, + GenAiOperationNameValues.CREATE_AGENT.value, + ) + span.set_attribute(GenAIAttributes.GEN_AI_PROVIDER_NAME, provider_name) + span.set_attribute(GenAIAttributes.GEN_AI_AGENT_NAME, agent_name) + + # Extract system instructions from prompt/system_prompt parameter + # LangGraph uses "prompt", LangChain uses "system_prompt" + system_instructions = kwargs.get("prompt") or kwargs.get("system_prompt") + if system_instructions: + if isinstance(system_instructions, str): + span.set_attribute(GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS, system_instructions) + elif hasattr(system_instructions, 'content'): + # SystemMessage or similar object with content attribute + span.set_attribute( + GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS, str(system_instructions.content) + ) + + # Extract tool definitions in OpenAI function format + # Tools can be in args[1] (positional) or kwargs + tools = kwargs.get("tools") + if tools is None and len(args) > 1: + tools = args[1] + if tools: + tool_definitions = [] + for tool in tools: + tool_def = _extract_tool_definition(tool) + if tool_def: + tool_definitions.append(tool_def) + if tool_definitions: + span.set_attribute( + GenAIAttributes.GEN_AI_TOOL_DEFINITIONS, + json.dumps(tool_definitions) + ) + + result = wrapped(*args, **kwargs) + + return result + + return wrapper diff --git a/packages/opentelemetry-instrumentation-langchain/pyproject.toml b/packages/opentelemetry-instrumentation-langchain/pyproject.toml index 1a7600fcfb..762b6e27b8 100644 --- a/packages/opentelemetry-instrumentation-langchain/pyproject.toml +++ b/packages/opentelemetry-instrumentation-langchain/pyproject.toml @@ -89,3 +89,6 @@ select = ["E", "F", "W"] [tool.uv] constraint-dependencies = ["urllib3>=2.6.3", "langgraph-checkpoint>=4.0.0", "pip>=25.3"] + +[tool.uv.sources] +opentelemetry-semantic-conventions-ai = { path = "../opentelemetry-semantic-conventions-ai", editable = true } diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_agents.py b/packages/opentelemetry-instrumentation-langchain/tests/test_agents.py index 588b88c189..9c8835ef04 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_agents.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_agents.py @@ -38,21 +38,21 @@ def test_agents(instrument_legacy, span_exporter, log_exporter): spans = span_exporter.get_finished_spans() assert set([span.name for span in spans]) == { - "RunnableLambda.task", - "RunnableParallel.task", - "RunnableAssign.task", - "ChatPromptTemplate.task", + "execute_task RunnableLambda", + "execute_task RunnableParallel", + "execute_task RunnableAssign", + "execute_task ChatPromptTemplate", "ChatOpenAI.chat", - "ToolsAgentOutputParser.task", - "RunnableSequence.task", - "tavily_search_results_json.tool", - "RunnableLambda.task", - "RunnableParallel.task", - "RunnableAssign.task", - "ChatPromptTemplate.task", + "execute_task ToolsAgentOutputParser", + "execute_task RunnableSequence", + "execute_tool tavily_search_results_json", + "execute_task RunnableLambda", + "execute_task RunnableParallel", + "execute_task RunnableAssign", + "execute_task ChatPromptTemplate", "ChatOpenAI.chat", - "ToolsAgentOutputParser.task", - "RunnableSequence.task", + "execute_task ToolsAgentOutputParser", + "execute_task RunnableSequence", "AgentExecutor.workflow", } @@ -82,21 +82,21 @@ def test_agents_with_events_with_content( spans = span_exporter.get_finished_spans() assert set([span.name for span in spans]) == { - "RunnableLambda.task", - "RunnableParallel.task", - "RunnableAssign.task", - "ChatPromptTemplate.task", + "execute_task RunnableLambda", + "execute_task RunnableParallel", + "execute_task RunnableAssign", + "execute_task ChatPromptTemplate", "ChatOpenAI.chat", - "ToolsAgentOutputParser.task", - "RunnableSequence.task", - "tavily_search_results_json.tool", - "RunnableLambda.task", - "RunnableParallel.task", - "RunnableAssign.task", - "ChatPromptTemplate.task", + "execute_task ToolsAgentOutputParser", + "execute_task RunnableSequence", + "execute_tool tavily_search_results_json", + "execute_task RunnableLambda", + "execute_task RunnableParallel", + "execute_task RunnableAssign", + "execute_task ChatPromptTemplate", "ChatOpenAI.chat", - "ToolsAgentOutputParser.task", - "RunnableSequence.task", + "execute_task ToolsAgentOutputParser", + "execute_task RunnableSequence", "AgentExecutor.workflow", } @@ -176,21 +176,21 @@ def test_agents_with_events_with_no_content( spans = span_exporter.get_finished_spans() assert set([span.name for span in spans]) == { - "RunnableLambda.task", - "RunnableParallel.task", - "RunnableAssign.task", - "ChatPromptTemplate.task", + "execute_task RunnableLambda", + "execute_task RunnableParallel", + "execute_task RunnableAssign", + "execute_task ChatPromptTemplate", "ChatOpenAI.chat", - "ToolsAgentOutputParser.task", - "RunnableSequence.task", - "tavily_search_results_json.tool", - "RunnableLambda.task", - "RunnableParallel.task", - "RunnableAssign.task", - "ChatPromptTemplate.task", + "execute_task ToolsAgentOutputParser", + "execute_task RunnableSequence", + "execute_tool tavily_search_results_json", + "execute_task RunnableLambda", + "execute_task RunnableParallel", + "execute_task RunnableAssign", + "execute_task ChatPromptTemplate", "ChatOpenAI.chat", - "ToolsAgentOutputParser.task", - "RunnableSequence.task", + "execute_task ToolsAgentOutputParser", + "execute_task RunnableSequence", "AgentExecutor.workflow", } diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py b/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py index 260f53d72f..bc5631ea47 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_chains.py @@ -51,9 +51,9 @@ def test_sequential_chain(instrument_legacy, span_exporter, log_exporter): assert [ "OpenAI.completion", - "synopsis.task", + "execute_task synopsis", "OpenAI.completion", - "LLMChain.task", + "execute_task LLMChain", "SequentialChain.workflow", ] == [span.name for span in spans] @@ -61,7 +61,7 @@ def test_sequential_chain(instrument_legacy, span_exporter, log_exporter): span for span in spans if span.name == "SequentialChain.workflow" ) task_spans = [ - span for span in spans if span.name in ["synopsis.task", "LLMChain.task"] + span for span in spans if span.name in ["execute_task synopsis", "execute_task LLMChain"] ] llm_spans = [span for span in spans if span.name == "OpenAI.completion"] @@ -84,8 +84,8 @@ def test_sequential_chain(instrument_legacy, span_exporter, log_exporter): for span in llm_spans ) - synopsis_span = next(span for span in spans if span.name == "synopsis.task") - review_span = next(span for span in spans if span.name == "LLMChain.task") + synopsis_span = next(span for span in spans if span.name == "execute_task synopsis") + review_span = next(span for span in spans if span.name == "execute_task LLMChain") data = json.loads(synopsis_span.attributes[SpanAttributes.TRACELOOP_ENTITY_INPUT]) assert data["inputs"] == { @@ -175,9 +175,9 @@ def test_sequential_chain_with_events_with_content( assert [ "OpenAI.completion", - "synopsis.task", + "execute_task synopsis", "OpenAI.completion", - "LLMChain.task", + "execute_task LLMChain", "SequentialChain.workflow", ] == [span.name for span in spans] @@ -185,7 +185,7 @@ def test_sequential_chain_with_events_with_content( span for span in spans if span.name == "SequentialChain.workflow" ) task_spans = [ - span for span in spans if span.name in ["synopsis.task", "LLMChain.task"] + span for span in spans if span.name in ["execute_task synopsis", "execute_task LLMChain"] ] llm_spans = [span for span in spans if span.name == "OpenAI.completion"] @@ -296,9 +296,9 @@ def test_sequential_chain_with_events_with_no_content( assert [ "OpenAI.completion", - "synopsis.task", + "execute_task synopsis", "OpenAI.completion", - "LLMChain.task", + "execute_task LLMChain", "SequentialChain.workflow", ] == [span.name for span in spans] @@ -306,7 +306,7 @@ def test_sequential_chain_with_events_with_no_content( span for span in spans if span.name == "SequentialChain.workflow" ) task_spans = [ - span for span in spans if span.name in ["synopsis.task", "LLMChain.task"] + span for span in spans if span.name in ["execute_task synopsis", "execute_task LLMChain"] ] llm_spans = [span for span in spans if span.name == "OpenAI.completion"] @@ -396,14 +396,14 @@ async def test_asequential_chain(instrument_legacy, span_exporter, log_exporter) assert [ "OpenAI.completion", - "LLMChain.task", + "execute_task LLMChain", "OpenAI.completion", - "LLMChain.task", + "execute_task LLMChain", "SequentialChain.workflow", ] == [span.name for span in spans] synopsis_span, review_span = [ - span for span in spans if span.name == "LLMChain.task" + span for span in spans if span.name == "execute_task LLMChain" ] data = json.loads(synopsis_span.attributes[SpanAttributes.TRACELOOP_ENTITY_INPUT]) @@ -484,9 +484,9 @@ async def test_asequential_chain_with_events_with_content( assert [ "OpenAI.completion", - "LLMChain.task", + "execute_task LLMChain", "OpenAI.completion", - "LLMChain.task", + "execute_task LLMChain", "SequentialChain.workflow", ] == [span.name for span in spans] @@ -569,14 +569,14 @@ async def test_asequential_chain_with_events_with_no_content( assert [ "OpenAI.completion", - "LLMChain.task", + "execute_task LLMChain", "OpenAI.completion", - "LLMChain.task", + "execute_task LLMChain", "SequentialChain.workflow", ] == [span.name for span in spans] synopsis_span, review_span = [ - span for span in spans if span.name == "LLMChain.task" + span for span in spans if span.name == "execute_task LLMChain" ] logs = log_exporter.get_finished_logs() @@ -610,8 +610,8 @@ def test_stream(instrument_legacy, span_exporter, log_exporter): assert set( [ - "PromptTemplate.task", - "StrOutputParser.task", + "execute_task PromptTemplate", + "execute_task StrOutputParser", "ChatCohere.chat", "RunnableSequence.workflow", ] @@ -638,8 +638,8 @@ def test_stream_with_events_with_content( assert set( [ - "PromptTemplate.task", - "StrOutputParser.task", + "execute_task PromptTemplate", + "execute_task StrOutputParser", "ChatCohere.chat", "RunnableSequence.workflow", ] @@ -682,8 +682,8 @@ def test_stream_with_events_with_no_content( assert set( [ - "PromptTemplate.task", - "StrOutputParser.task", + "execute_task PromptTemplate", + "execute_task StrOutputParser", "ChatCohere.chat", "RunnableSequence.workflow", ] @@ -721,9 +721,9 @@ async def test_astream(instrument_legacy, span_exporter, log_exporter): assert set( [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatCohere.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] ) == set([span.name for span in spans]) @@ -752,9 +752,9 @@ async def test_astream_with_events_with_content( assert set( [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatCohere.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] ) == set([span.name for span in spans]) @@ -797,9 +797,9 @@ async def test_astream_with_events_with_no_content( assert set( [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatCohere.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] ) == set([span.name for span in spans]) diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_documents_chains.py b/packages/opentelemetry-instrumentation-langchain/tests/test_documents_chains.py index b56af25dbd..59b272062e 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_documents_chains.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_documents_chains.py @@ -51,7 +51,7 @@ def test_sequential_chain(instrument_legacy, span_exporter, log_exporter): assert [ "ChatCohere.chat", - "LLMChain.task", + "execute_task LLMChain", "stuff_chain.workflow", ] == [span.name for span in spans] @@ -88,7 +88,7 @@ def test_sequential_chain_with_events_with_content( assert [ "ChatCohere.chat", - "LLMChain.task", + "execute_task LLMChain", "stuff_chain.workflow", ] == [span.name for span in spans] @@ -134,7 +134,7 @@ def test_sequential_chain_with_events_with_no_content( assert [ "ChatCohere.chat", - "LLMChain.task", + "execute_task LLMChain", "stuff_chain.workflow", ] == [span.name for span in spans] diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_langgraph.py b/packages/opentelemetry-instrumentation-langchain/tests/test_langgraph.py index a17d8f94e2..c912106200 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_langgraph.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_langgraph.py @@ -1,12 +1,15 @@ +from typing import List, TypedDict + import pytest -from openai import OpenAI -from typing import TypedDict +from langchain.agents.middleware.types import AgentMiddleware from langgraph.graph import StateGraph +from openai import OpenAI from opentelemetry import trace from opentelemetry.semconv._incubating.attributes import ( gen_ai_attributes as GenAIAttributes, ) -from opentelemetry.semconv_ai import SpanAttributes +from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GenAiOperationNameValues +from opentelemetry.semconv_ai import GenAICustomOperationName, SpanAttributes from opentelemetry.trace import INVALID_SPAN @@ -38,12 +41,19 @@ def calculate(state: State): user_request = "What's 5 + 5?" response = langgraph.invoke(input={"request": user_request})["result"] spans = span_exporter.get_finished_spans() - assert set(["LangGraph.workflow", "calculate.task", "openai.chat"]) == set( + assert set(["LangGraph.workflow", "execute_task calculate", "openai.chat", "invoke_agent LangGraph"]) == set( [span.name for span in spans] ) openai_span = next(span for span in spans if span.name == "openai.chat") - calculate_task_span = next(span for span in spans if span.name == "calculate.task") + calculate_task_span = next(span for span in spans if span.name == "execute_task calculate") + graph_span = next(span for span in spans if span.name == "invoke_agent LangGraph") + + # Verify GenAI semantic convention attributes on graph span + assert graph_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == GenAiOperationNameValues.INVOKE_AGENT.value + assert graph_span.attributes[GenAIAttributes.GEN_AI_PROVIDER_NAME] == "langgraph" + assert graph_span.attributes[GenAIAttributes.GEN_AI_AGENT_NAME] == "LangGraph" + # agent_id removed per maintainer feedback - rely on agent name only assert openai_span.parent.span_id == calculate_task_span.context.span_id assert openai_span.attributes[SpanAttributes.LLM_REQUEST_TYPE] == "chat" @@ -98,13 +108,19 @@ def calculate(state: State): user_request = "What's 5 + 5?" await langgraph.ainvoke(input={"request": user_request}) spans = span_exporter.get_finished_spans() - assert set(["LangGraph.workflow", "calculate.task", "openai.chat"]) == set( + assert set(["LangGraph.workflow", "execute_task calculate", "openai.chat", "invoke_agent LangGraph"]) == set( [span.name for span in spans] ) openai_span = next(span for span in spans if span.name == "openai.chat") - calculate_task_span = next(span for span in spans if span.name == "calculate.task") + calculate_task_span = next(span for span in spans if span.name == "execute_task calculate") + graph_span = next(span for span in spans if span.name == "invoke_agent LangGraph") + assert openai_span.parent.span_id == calculate_task_span.context.span_id + # Verify GenAI semantic convention attributes on graph span + assert graph_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == GenAiOperationNameValues.INVOKE_AGENT.value + assert graph_span.attributes[GenAIAttributes.GEN_AI_PROVIDER_NAME] == "langgraph" + @pytest.mark.vcr def test_langgraph_double_invoke(instrument_legacy, span_exporter): @@ -132,19 +148,27 @@ def build_graph(): spans = span_exporter.get_finished_spans() assert [ - "mynode.task", + "execute_task mynode", "LangGraph.workflow", + "invoke_agent LangGraph", ] == [span.name for span in spans] + # Verify GenAI attributes on graph span + graph_span = next(span for span in spans if span.name == "invoke_agent LangGraph") + assert graph_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == GenAiOperationNameValues.INVOKE_AGENT.value + assert graph_span.attributes[GenAIAttributes.GEN_AI_PROVIDER_NAME] == "langgraph" + graph.invoke({"result": "init"}) assert trace.get_current_span() == INVALID_SPAN spans = span_exporter.get_finished_spans() assert [ - "mynode.task", + "execute_task mynode", "LangGraph.workflow", - "mynode.task", + "invoke_agent LangGraph", + "execute_task mynode", "LangGraph.workflow", + "invoke_agent LangGraph", ] == [span.name for span in spans] @@ -172,18 +196,26 @@ def build_graph(): spans = span_exporter.get_finished_spans() assert [ - "mynode.task", + "execute_task mynode", "LangGraph.workflow", + "invoke_agent LangGraph", ] == [span.name for span in spans] + # Verify GenAI attributes on graph span + graph_span = next(span for span in spans if span.name == "invoke_agent LangGraph") + assert graph_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == GenAiOperationNameValues.INVOKE_AGENT.value + assert graph_span.attributes[GenAIAttributes.GEN_AI_PROVIDER_NAME] == "langgraph" + await graph.ainvoke({"result": "init"}) spans = span_exporter.get_finished_spans() assert [ - "mynode.task", + "execute_task mynode", "LangGraph.workflow", - "mynode.task", + "invoke_agent LangGraph", + "execute_task mynode", "LangGraph.workflow", + "invoke_agent LangGraph", ] == [span.name for span in spans] @@ -311,35 +343,44 @@ async def run_test_agent(): assert "test_agent_execution_root" in span_names assert "POST" in span_names assert "test_agent_span" in span_names - assert "http_call.task" in span_names - assert "otel_span.task" in span_names + assert "execute_task http_call" in span_names + assert "execute_task otel_span" in span_names assert "LangGraph.workflow" in span_names + assert "invoke_agent LangGraph" in span_names root_span = next(span for span in spans if span.name == "test_agent_execution_root") post_span = next(span for span in spans if span.name == "POST") test_agent_span = next(span for span in spans if span.name == "test_agent_span") - http_call_task_span = next(span for span in spans if span.name == "http_call.task") - otel_span_task_span = next(span for span in spans if span.name == "otel_span.task") + http_call_task_span = next(span for span in spans if span.name == "execute_task http_call") + otel_span_task_span = next(span for span in spans if span.name == "execute_task otel_span") workflow_span = next(span for span in spans if span.name == "LangGraph.workflow") + graph_span = next(span for span in spans if span.name == "invoke_agent LangGraph") + + # Verify GenAI semantic convention attributes on graph span + assert graph_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == GenAiOperationNameValues.INVOKE_AGENT.value + assert graph_span.attributes[GenAIAttributes.GEN_AI_PROVIDER_NAME] == "langgraph" + assert graph_span.attributes[GenAIAttributes.GEN_AI_AGENT_NAME] == "LangGraph" + # agent_id removed per maintainer feedback - rely on agent name only print("\nHierarchy check:") print(f"POST parent: {post_span.parent.span_id if post_span.parent else 'None'}") - print(f"http_call.task ID: {http_call_task_span.context.span_id}") + print(f"execute_task http_call ID: {http_call_task_span.context.span_id}") print( f"test_agent_span parent: {test_agent_span.parent.span_id if test_agent_span.parent else 'None'}" ) - print(f"otel_span.task ID: {otel_span_task_span.context.span_id}") + print(f"execute_task otel_span ID: {otel_span_task_span.context.span_id}") assert ( post_span.parent.span_id == http_call_task_span.context.span_id - ), "POST span should be child of http_call.task span" + ), "POST span should be child of execute_task http_call span" assert ( test_agent_span.parent.span_id == otel_span_task_span.context.span_id - ), "test_agent_span should be child of otel_span.task span" + ), "test_agent_span should be child of execute_task otel_span span" assert http_call_task_span.parent.span_id == workflow_span.context.span_id assert otel_span_task_span.parent.span_id == workflow_span.context.span_id - assert workflow_span.parent.span_id == root_span.context.span_id + assert workflow_span.parent.span_id == graph_span.context.span_id + assert graph_span.parent.span_id == root_span.context.span_id def test_context_detachment_error_handling( @@ -481,3 +522,279 @@ async def run_concurrent_executions(): assert ( parent_span.name == "concurrent_async_span" ), "Nested span should be child of concurrent_async_span" + + +def test_create_react_agent_span(instrument_legacy, span_exporter): + """Test create_react_agent span has GenAI semantic convention attributes.""" + from langchain_core.language_models import BaseChatModel + from langchain_core.messages import AIMessage + from langchain_core.outputs import ChatGeneration, ChatResult + from langchain_core.tools import tool + from langgraph.prebuilt import create_react_agent + + class MockChatModel(BaseChatModel): + @property + def _llm_type(self) -> str: + return "mock" + + def _generate(self, messages, stop=None, run_manager=None, **kwargs): + return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Mock"))]) + + def bind_tools(self, tools, **kwargs): + return self + + @tool + def get_weather(city: str) -> str: + """Get weather.""" + return f"Weather in {city}" + + _ = create_react_agent(model=MockChatModel(), tools=[get_weather], name="TestAgent") + + spans = span_exporter.get_finished_spans() + create_span = next(s for s in spans if "create_agent" in s.name) + + assert create_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == GenAiOperationNameValues.CREATE_AGENT.value + assert create_span.attributes[GenAIAttributes.GEN_AI_AGENT_NAME] == "TestAgent" + assert GenAIAttributes.GEN_AI_TOOL_DEFINITIONS in create_span.attributes + + +def test_retriever_span_attributes(instrument_legacy, span_exporter): + """Test retriever span has GenAI semantic convention attributes.""" + from langchain_core.callbacks import CallbackManagerForRetrieverRun + from langchain_core.documents import Document + from langchain_core.retrievers import BaseRetriever + + class MockRetriever(BaseRetriever): + def _get_relevant_documents( + self, query: str, *, run_manager: CallbackManagerForRetrieverRun + ) -> List[Document]: + return [Document(page_content="Test", metadata={"source": "test.txt"})] + + MockRetriever().invoke("test query") + + spans = span_exporter.get_finished_spans() + retriever_span = next(s for s in spans if "MockRetriever" in s.name) + + assert ( + retriever_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] + == GenAICustomOperationName.VECTOR_DB_RETRIEVE.value + ) + assert SpanAttributes.GEN_AI_TASK_INPUT in retriever_span.attributes + assert retriever_span.attributes[SpanAttributes.GEN_AI_TASK_STATUS] == "success" + + +def test_middleware_hook_span_attributes(instrument_legacy, span_exporter): + """Test middleware hook span has GenAI semantic convention attributes.""" + + class TestMiddleware(AgentMiddleware): + def before_model(self, state, runtime): + return super().before_model(state, runtime) + + TestMiddleware().before_model({"messages": []}, None) + + spans = span_exporter.get_finished_spans() + middleware_span = next(s for s in spans if "TestMiddleware" in s.name) + + assert ( + middleware_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] + == GenAICustomOperationName.EXECUTE_TASK.value + ) + assert middleware_span.attributes[SpanAttributes.GEN_AI_TASK_KIND] == "TestMiddleware" + assert middleware_span.attributes[SpanAttributes.GEN_AI_TASK_STATUS] == "success" + + +def test_langgraph_custom_name(instrument_legacy, span_exporter): + """Test that custom run_name in config appears in span name and attributes.""" + + class CustomState(TypedDict): + value: str + + def my_node(state: CustomState) -> CustomState: + return state + + workflow = StateGraph(CustomState) + workflow.add_node("my_node", my_node) + workflow.set_entry_point("my_node") + graph = workflow.compile() + + # Invoke with custom run_name in config + graph.invoke({"value": "test"}, config={"run_name": "MyCustomAgent"}) + + spans = span_exporter.get_finished_spans() + span_names = [span.name for span in spans] + + # Verify custom name appears in span name + assert "invoke_agent MyCustomAgent" in span_names + + # Get the graph span and verify attributes + graph_span = next(span for span in spans if span.name == "invoke_agent MyCustomAgent") + assert graph_span.attributes[GenAIAttributes.GEN_AI_AGENT_NAME] == "MyCustomAgent" + assert graph_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == GenAiOperationNameValues.INVOKE_AGENT.value + + +def test_command_with_goto(instrument_legacy, span_exporter): + """Test Command with goto creates span with source and destination nodes.""" + from langgraph.graph import END + from langgraph.types import Command + from opentelemetry import context as context_api + + class CommandState(TypedDict): + value: str + + def router_node(state: CommandState): + # The Command.__init__ wrapper reads "langgraph_current_node" from context + # to determine which node created the Command (for source_node attribute). + # In a real LangGraph execution, the instrumentation sets this context + # when entering each node. Here we set it manually to test the Command wrapper. + ctx = context_api.set_value("langgraph_current_node", "router_node") + token = context_api.attach(ctx) + try: + return Command(goto="target_node", update={"value": "routed"}) + finally: + context_api.detach(token) + + def target_node(state: CommandState): + return {"value": "done"} + + workflow = StateGraph(CommandState) + workflow.add_node("router_node", router_node) + workflow.add_node("target_node", target_node) + workflow.set_entry_point("router_node") + workflow.add_edge("target_node", END) + + graph = workflow.compile() + graph.invoke({"value": "start"}) + + spans = span_exporter.get_finished_spans() + goto_spans = [s for s in spans if "goto" in s.name] + + # Verify goto span was created with correct attributes + assert len(goto_spans) >= 1 + goto_span = goto_spans[0] + assert goto_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] == "goto" + assert SpanAttributes.LANGGRAPH_COMMAND_SOURCE_NODE in goto_span.attributes + assert SpanAttributes.LANGGRAPH_COMMAND_GOTO_NODE in goto_span.attributes + + +def test_send_extraction(): + """Test _extract_goto_destinations handles Send objects correctly.""" + from langgraph.types import Send + from opentelemetry.instrumentation.langchain.patch import _extract_goto_destinations + + # Test with single string + result = _extract_goto_destinations("target_node") + assert result == ["target_node"] + + # Test with Send object + send = Send("worker_node", {"item": "a"}) + result = _extract_goto_destinations(send) + assert result == ["worker_node"] + + # Test with list of strings + result = _extract_goto_destinations(["node1", "node2"]) + assert result == ["node1", "node2"] + + # Test with list of Send objects + sends = [Send("worker1", {}), Send("worker2", {})] + result = _extract_goto_destinations(sends) + assert result == ["worker1", "worker2"] + + # Test with mixed list + mixed = ["node1", Send("worker", {})] + result = _extract_goto_destinations(mixed) + assert result == ["node1", "worker"] + + +def test_create_agent_with_system_prompt(instrument_legacy, span_exporter): + """Test create_react_agent with system prompt captures gen_ai.system_instructions.""" + from langchain_core.language_models import BaseChatModel + from langchain_core.messages import AIMessage + from langchain_core.outputs import ChatGeneration, ChatResult + from langchain_core.tools import tool + from langgraph.prebuilt import create_react_agent + + class MockChatModel(BaseChatModel): + @property + def _llm_type(self) -> str: + return "mock" + + def _generate(self, messages, stop=None, run_manager=None, **kwargs): + return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Mock"))]) + + def bind_tools(self, tools, **kwargs): + return self + + @tool + def get_info(query: str) -> str: + """Get information.""" + return f"Info: {query}" + + # Create agent with system prompt + _ = create_react_agent( + model=MockChatModel(), + tools=[get_info], + name="PromptAgent", + prompt="You are a helpful assistant that provides accurate information." + ) + + spans = span_exporter.get_finished_spans() + create_span = next(s for s in spans if "create_agent" in s.name) + + assert create_span.attributes[GenAIAttributes.GEN_AI_AGENT_NAME] == "PromptAgent" + assert GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS in create_span.attributes + assert "helpful assistant" in create_span.attributes[GenAIAttributes.GEN_AI_SYSTEM_INSTRUCTIONS] + + +@pytest.mark.asyncio +async def test_async_middleware_hook(instrument_legacy, span_exporter): + """Test async middleware hook creates span with correct attributes.""" + + class AsyncTestMiddleware(AgentMiddleware): + async def abefore_model(self, state, runtime): + return await super().abefore_model(state, runtime) + + middleware = AsyncTestMiddleware() + await middleware.abefore_model({"messages": []}, None) + + spans = span_exporter.get_finished_spans() + middleware_spans = [s for s in spans if "AsyncTestMiddleware" in s.name] + + assert len(middleware_spans) >= 1 + middleware_span = middleware_spans[0] + assert ( + middleware_span.attributes[GenAIAttributes.GEN_AI_OPERATION_NAME] + == GenAICustomOperationName.EXECUTE_TASK.value + ) + assert middleware_span.attributes[SpanAttributes.GEN_AI_TASK_KIND] == "AsyncTestMiddleware" + + +def test_middleware_super_call_succeeds_despite_outer_failure(instrument_legacy, span_exporter): + """Test that wrapper records super() call as success even when outer method raises. + + The instrumentation wraps AgentMiddleware.before_model (the base class method). + When a subclass calls super().before_model(), that wrapped call succeeds. + Even if the subclass's own before_model() then raises an exception, the span + for the super() call correctly records status="success". + """ + + class FailingMiddleware(AgentMiddleware): + def before_model(self, state, runtime): + # Call super first to trigger the wrapper, then fail + super().before_model(state, runtime) + raise ValueError("Intentional failure") + + middleware = FailingMiddleware() + try: + middleware.before_model({"messages": []}, None) + except ValueError: + pass # Expected + + spans = span_exporter.get_finished_spans() + # The wrapper is on AgentMiddleware.before_model, so look for that + middleware_spans = [s for s in spans if "before_model" in s.name] + + # Should have at least one span from calling super().before_model() + assert len(middleware_spans) >= 1 + # The span from super() call should succeed (before the ValueError is raised) + middleware_span = middleware_spans[0] + assert middleware_span.attributes[SpanAttributes.GEN_AI_TASK_STATUS] == "success" diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_lcel.py b/packages/opentelemetry-instrumentation-langchain/tests/test_lcel.py index 0d3d39261c..cf981f9bc1 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_lcel.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_lcel.py @@ -42,8 +42,8 @@ class Joke(BaseModel): assert set( [ - "ChatPromptTemplate.task", - "JsonOutputFunctionsParser.task", + "execute_task ChatPromptTemplate", + "execute_task JsonOutputFunctionsParser", "ChatOpenAI.chat", "ThisIsATestChain.workflow", ] @@ -53,13 +53,13 @@ class Joke(BaseModel): span for span in spans if span.name == "ThisIsATestChain.workflow" ) prompt_task_span = next( - span for span in spans if span.name == "ChatPromptTemplate.task" + span for span in spans if span.name == "execute_task ChatPromptTemplate" ) chat_openai_task_span = next( span for span in spans if span.name == "ChatOpenAI.chat" ) output_parser_task_span = next( - span for span in spans if span.name == "JsonOutputFunctionsParser.task" + span for span in spans if span.name == "execute_task JsonOutputFunctionsParser" ) assert prompt_task_span.parent.span_id == workflow_span.context.span_id @@ -160,8 +160,8 @@ class Joke(BaseModel): assert set( [ - "ChatPromptTemplate.task", - "JsonOutputFunctionsParser.task", + "execute_task ChatPromptTemplate", + "execute_task JsonOutputFunctionsParser", "ChatOpenAI.chat", "ThisIsATestChain.workflow", ] @@ -171,13 +171,13 @@ class Joke(BaseModel): span for span in spans if span.name == "ThisIsATestChain.workflow" ) prompt_task_span = next( - span for span in spans if span.name == "ChatPromptTemplate.task" + span for span in spans if span.name == "execute_task ChatPromptTemplate" ) chat_openai_task_span = next( span for span in spans if span.name == "ChatOpenAI.chat" ) output_parser_task_span = next( - span for span in spans if span.name == "JsonOutputFunctionsParser.task" + span for span in spans if span.name == "execute_task JsonOutputFunctionsParser" ) assert prompt_task_span.parent.span_id == workflow_span.context.span_id @@ -244,8 +244,8 @@ class Joke(BaseModel): assert set( [ - "ChatPromptTemplate.task", - "JsonOutputFunctionsParser.task", + "execute_task ChatPromptTemplate", + "execute_task JsonOutputFunctionsParser", "ChatOpenAI.chat", "ThisIsATestChain.workflow", ] @@ -255,13 +255,13 @@ class Joke(BaseModel): span for span in spans if span.name == "ThisIsATestChain.workflow" ) prompt_task_span = next( - span for span in spans if span.name == "ChatPromptTemplate.task" + span for span in spans if span.name == "execute_task ChatPromptTemplate" ) chat_openai_task_span = next( span for span in spans if span.name == "ChatOpenAI.chat" ) output_parser_task_span = next( - span for span in spans if span.name == "JsonOutputFunctionsParser.task" + span for span in spans if span.name == "execute_task JsonOutputFunctionsParser" ) assert prompt_task_span.parent.span_id == workflow_span.context.span_id @@ -304,9 +304,9 @@ async def test_async_lcel(instrument_legacy, span_exporter, log_exporter): spans = span_exporter.get_finished_spans() assert { - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", } == set([span.name for span in spans]) @@ -317,7 +317,7 @@ async def test_async_lcel(instrument_legacy, span_exporter, log_exporter): span for span in spans if span.name == "ChatOpenAI.chat" ) output_parser_task_span = next( - span for span in spans if span.name == "StrOutputParser.task" + span for span in spans if span.name == "execute_task StrOutputParser" ) assert chat_openai_task_span.parent.span_id == workflow_span.context.span_id @@ -362,9 +362,9 @@ async def test_async_lcel_with_events_with_content( spans = span_exporter.get_finished_spans() assert { - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", } == set([span.name for span in spans]) @@ -375,7 +375,7 @@ async def test_async_lcel_with_events_with_content( span for span in spans if span.name == "ChatOpenAI.chat" ) output_parser_task_span = next( - span for span in spans if span.name == "StrOutputParser.task" + span for span in spans if span.name == "execute_task StrOutputParser" ) assert chat_openai_task_span.parent.span_id == workflow_span.context.span_id @@ -421,9 +421,9 @@ async def test_async_lcel_with_events_with_no_content( spans = span_exporter.get_finished_spans() assert { - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", } == set([span.name for span in spans]) @@ -434,7 +434,7 @@ async def test_async_lcel_with_events_with_no_content( span for span in spans if span.name == "ChatOpenAI.chat" ) output_parser_task_span = next( - span for span in spans if span.name == "StrOutputParser.task" + span for span in spans if span.name == "execute_task StrOutputParser" ) assert chat_openai_task_span.parent.span_id == workflow_span.context.span_id @@ -472,9 +472,9 @@ def test_invoke(instrument_legacy, span_exporter, log_exporter): spans = span_exporter.get_finished_spans() assert [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -502,9 +502,9 @@ def test_invoke_with_events_with_content( spans = span_exporter.get_finished_spans() assert [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -546,9 +546,9 @@ def test_invoke_with_events_with_no_content( spans = span_exporter.get_finished_spans() assert [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -588,9 +588,9 @@ def test_stream(instrument_legacy, span_exporter, log_exporter): spans = span_exporter.get_finished_spans() assert [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -621,9 +621,9 @@ def test_stream_with_events_with_content( spans = span_exporter.get_finished_spans() assert [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -669,9 +669,9 @@ def test_stream_with_events_with_no_content( spans = span_exporter.get_finished_spans() assert [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -708,9 +708,9 @@ async def test_async_invoke(instrument_legacy, span_exporter, log_exporter): spans = span_exporter.get_finished_spans() assert [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -739,9 +739,9 @@ async def test_async_invoke_with_events_with_content( spans = span_exporter.get_finished_spans() assert [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -784,9 +784,9 @@ async def test_async_invoke_with_events_with_no_content( spans = span_exporter.get_finished_spans() assert [ - "PromptTemplate.task", + "execute_task PromptTemplate", "ChatOpenAI.chat", - "StrOutputParser.task", + "execute_task StrOutputParser", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -850,8 +850,8 @@ class Joke(BaseModel): assert set( [ - "ChatPromptTemplate.task", - "JsonOutputFunctionsParser.task", + "execute_task ChatPromptTemplate", + "execute_task JsonOutputFunctionsParser", "ChatOpenAI.chat", "DateTimeTestChain.workflow", ] @@ -899,8 +899,8 @@ class Joke(BaseModel): assert set( [ - "ChatPromptTemplate.task", - "JsonOutputFunctionsParser.task", + "execute_task ChatPromptTemplate", + "execute_task JsonOutputFunctionsParser", "ChatOpenAI.chat", "DateTimeTestChain.workflow", ] @@ -975,8 +975,8 @@ class Joke(BaseModel): assert set( [ - "ChatPromptTemplate.task", - "JsonOutputFunctionsParser.task", + "execute_task ChatPromptTemplate", + "execute_task JsonOutputFunctionsParser", "ChatOpenAI.chat", "DateTimeTestChain.workflow", ] diff --git a/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py b/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py index fe88819d71..fcf8767b75 100644 --- a/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py +++ b/packages/opentelemetry-instrumentation-langchain/tests/test_llms.py @@ -138,7 +138,7 @@ def test_custom_llm(instrument_legacy, span_exporter, log_exporter): spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "HuggingFaceTextGenInference.completion", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -183,7 +183,7 @@ def test_custom_llm_with_events_with_content( spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "HuggingFaceTextGenInference.completion", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -234,7 +234,7 @@ def test_custom_llm_with_events_with_no_content( spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "HuggingFaceTextGenInference.completion", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -276,7 +276,7 @@ def test_openai(instrument_legacy, span_exporter, log_exporter): spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "ChatOpenAI.chat", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -331,7 +331,7 @@ def test_openai_with_events_with_content( spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "ChatOpenAI.chat", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -382,7 +382,7 @@ def test_openai_with_events_with_no_content( spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "ChatOpenAI.chat", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -437,8 +437,8 @@ class Joke(BaseModel): assert set( [ - "ChatPromptTemplate.task", - "JsonOutputFunctionsParser.task", + "execute_task ChatPromptTemplate", + "execute_task JsonOutputFunctionsParser", "ChatOpenAI.chat", "RunnableSequence.workflow", ] @@ -527,8 +527,8 @@ class Joke(BaseModel): assert set( [ - "ChatPromptTemplate.task", - "JsonOutputFunctionsParser.task", + "execute_task ChatPromptTemplate", + "execute_task JsonOutputFunctionsParser", "ChatOpenAI.chat", "RunnableSequence.workflow", ] @@ -600,8 +600,8 @@ class Joke(BaseModel): assert set( [ - "ChatPromptTemplate.task", - "JsonOutputFunctionsParser.task", + "execute_task ChatPromptTemplate", + "execute_task JsonOutputFunctionsParser", "ChatOpenAI.chat", "RunnableSequence.workflow", ] @@ -648,7 +648,7 @@ def test_anthropic(instrument_legacy, span_exporter, log_exporter): spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "ChatAnthropic.chat", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -718,7 +718,7 @@ def test_anthropic_with_events_with_content( spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "ChatAnthropic.chat", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -774,7 +774,7 @@ def test_anthropic_with_events_with_no_content( spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "ChatAnthropic.chat", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -833,7 +833,7 @@ def test_bedrock(instrument_legacy, span_exporter, log_exporter): spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "ChatBedrock.chat", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -907,7 +907,7 @@ def test_bedrock_with_events_with_content( spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "ChatBedrock.chat", "RunnableSequence.workflow", ] == [span.name for span in spans] @@ -970,7 +970,7 @@ def test_bedrock_with_events_with_no_content( spans = span_exporter.get_finished_spans() assert [ - "ChatPromptTemplate.task", + "execute_task ChatPromptTemplate", "ChatBedrock.chat", "RunnableSequence.workflow", ] == [span.name for span in spans] diff --git a/packages/opentelemetry-instrumentation-langchain/uv.lock b/packages/opentelemetry-instrumentation-langchain/uv.lock index ec687af701..63462ad9e9 100644 --- a/packages/opentelemetry-instrumentation-langchain/uv.lock +++ b/packages/opentelemetry-instrumentation-langchain/uv.lock @@ -168,7 +168,7 @@ wheels = [ [[package]] name = "anthropic" -version = "0.82.0" +version = "0.75.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -180,9 +180,9 @@ dependencies = [ { name = "sniffio" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6f/94/3766b5414d9e35687d518943a5b2ffb2696cd5c53248eec13fa1e8a5c73d/anthropic-0.82.0.tar.gz", hash = "sha256:e217340ba40cb9e24c88aacccc365334a6c3f46778855eca5000a6aa83d73dde", size = 533270, upload-time = "2026-02-18T20:25:16.844Z" } +sdist = { url = "https://files.pythonhosted.org/packages/04/1f/08e95f4b7e2d35205ae5dcbb4ae97e7d477fc521c275c02609e2931ece2d/anthropic-0.75.0.tar.gz", hash = "sha256:e8607422f4ab616db2ea5baacc215dd5f028da99ce2f022e33c7c535b29f3dfb", size = 439565, upload-time = "2025-11-24T20:41:45.28Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5c/49/b570250e36471effbc146d22ffb111e775f11ff2d8b503b32526f25a8f23/anthropic-0.82.0-py3-none-any.whl", hash = "sha256:2525828b6798635a7a691c4c62d49bd10bbd288ab83fa4ba55851264dfa5377d", size = 456304, upload-time = "2026-02-18T20:25:18.788Z" }, + { url = "https://files.pythonhosted.org/packages/60/1c/1cd02b7ae64302a6e06724bf80a96401d5313708651d277b1458504a1730/anthropic-0.75.0-py3-none-any.whl", hash = "sha256:ea8317271b6c15d80225a9f3c670152746e88805a7a61e14d4a374577164965b", size = 388164, upload-time = "2025-11-24T20:41:43.587Z" }, ] [[package]] @@ -230,41 +230,32 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9e/43/53afb8ba17218f19b77c7834128566c5bbb100a0ad9ba2e8e89d089d7079/autopep8-2.3.2-py2.py3-none-any.whl", hash = "sha256:ce8ad498672c845a0c3de2629c15b635ec2b05ef8177a6e7c91c74f3e9b51128", size = 45807, upload-time = "2025-01-14T14:46:15.466Z" }, ] -[[package]] -name = "backports-asyncio-runner" -version = "1.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8e/ff/70dca7d7cb1cbc0edb2c6cc0c38b65cba36cccc491eca64cabd5fe7f8670/backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162", size = 69893, upload-time = "2025-07-02T02:27:15.685Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" }, -] - [[package]] name = "boto3" -version = "1.42.52" +version = "1.42.28" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, { name = "jmespath" }, { name = "s3transfer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/41/ed/8eacb8ec7bf264079608be5f9a2a57e31e7fed7a791bb3b15500ca9274a5/boto3-1.42.52.tar.gz", hash = "sha256:ff4a4afb832f63a1358e11fe6eb321da0f4767979c6721dd32fb02e6eabcebf5", size = 112811, upload-time = "2026-02-18T21:54:57.804Z" } +sdist = { url = "https://files.pythonhosted.org/packages/83/aa/a44ea8c8ee8239f3f7c32cce966512c846297df5fe48b56db6882f3b7ca0/boto3-1.42.28.tar.gz", hash = "sha256:7d56c298b8d98f5e9b04cf5d6627f68e7792e25614533aef17f815681b5e1096", size = 112846, upload-time = "2026-01-14T20:37:21.448Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2d/2a/de34ad6c43c56fe6dd5824bff2cd7fdef5edd9de0617cbd217040318ba97/boto3-1.42.52-py3-none-any.whl", hash = "sha256:7b3e0c4bfd8815a3df64fbe98fc9f87dfb12bd7a783cf63dfc2f166c66798c9d", size = 140556, upload-time = "2026-02-18T21:54:56.609Z" }, + { url = "https://files.pythonhosted.org/packages/69/35/5d95169ed145f0c49ebfeb6a5228ab63d54e95a2c7a43f0f0eb893540660/boto3-1.42.28-py3-none-any.whl", hash = "sha256:7994bc2a094c1894f6a4221a1696c5d18af6c9c888191051866f1d05c4fba431", size = 140575, upload-time = "2026-01-14T20:37:20.098Z" }, ] [[package]] name = "botocore" -version = "1.42.52" +version = "1.42.28" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath" }, { name = "python-dateutil" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c2/37/7044e09d416ff746d23c7456e8c30ddade1154ecd08814b17ab7e2c20fb0/botocore-1.42.52.tar.gz", hash = "sha256:3bdef10aee4cee13ff019b6a1423a2ce3ca17352328d9918157a1829e5cc9be1", size = 14917923, upload-time = "2026-02-18T21:54:48.06Z" } +sdist = { url = "https://files.pythonhosted.org/packages/85/8d/e0828726aa568e5ab0ec477c7a47a82aa37f00951858d9ad892b6b1d5e32/botocore-1.42.28.tar.gz", hash = "sha256:0c15e78d1accf97df691083331f682e97b1bef73ef12dcdaadcf652abf9c182c", size = 14886029, upload-time = "2026-01-14T20:37:11.137Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/94/67/bbd723d489b25ff9f94a734e734986bb8343263dd024a3846291028c26d0/botocore-1.42.52-py3-none-any.whl", hash = "sha256:c3a0b7138a4c5a534da0eb2444c19763b4d03ba2190c0602c49315e54efd7252", size = 14588731, upload-time = "2026-02-18T21:54:45.532Z" }, + { url = "https://files.pythonhosted.org/packages/8a/ff/72470b92ba96868be1936b8b3c7a70f902b60d36268bdeddb732317bef7a/botocore-1.42.28-py3-none-any.whl", hash = "sha256:d26c7a0851489ce1a18279f9802fe434bd736ea861d4888cc2c7d83fb1f6af8f", size = 14559264, upload-time = "2026-01-14T20:37:08.184Z" }, ] [[package]] @@ -930,35 +921,35 @@ wheels = [ [[package]] name = "langchain" -version = "1.2.10" +version = "1.2.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, { name = "langgraph" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/16/22/a4d4ac98fc2e393537130bbfba0d71a8113e6f884d96f935923e247397fe/langchain-1.2.10.tar.gz", hash = "sha256:bdcd7218d9c79a413cf15e106e4eb94408ac0963df9333ccd095b9ed43bf3be7", size = 570071, upload-time = "2026-02-10T14:56:49.74Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/bc/d8f506a525baadee99a65c6cc28c1c35c9eaf1cb2009f048e9861d81a600/langchain-1.2.6.tar.gz", hash = "sha256:7d46cbf719d860a16f6fc182d5d3de17453dda187f3d43e9c40ac352a5094fdd", size = 553127, upload-time = "2026-01-16T19:21:19.611Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/06/c3394327f815fade875724c0f6cff529777c96a1e17fea066deb997f8cf5/langchain-1.2.10-py3-none-any.whl", hash = "sha256:e07a377204451fffaed88276b8193e894893b1003e25c5bca6539288ccca3698", size = 111738, upload-time = "2026-02-10T14:56:47.985Z" }, + { url = "https://files.pythonhosted.org/packages/3f/28/d5dc4cb06ccb29d62a590d446072964766555e85863f5044c6e644c07d0d/langchain-1.2.6-py3-none-any.whl", hash = "sha256:a9a6c39f03c09b6eb0f1b47e267ad2a2fd04e124dfaa9753bd6c11d2fe7d944e", size = 108458, upload-time = "2026-01-16T19:21:18.085Z" }, ] [[package]] name = "langchain-anthropic" -version = "1.3.3" +version = "1.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anthropic" }, { name = "langchain-core" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/58/48/cf217b3836099220737ff1f8fd07a554993080dfc9c0b4dd4af16ccb0604/langchain_anthropic-1.3.3.tar.gz", hash = "sha256:37198413c9bde5a9e9829f13c7b9ed4870d7085e7fba9fd803ef4d98ef8ea220", size = 686916, upload-time = "2026-02-10T21:02:28.924Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/b6/ac5ee84e15bf79844c9c791f99a614c7ec7e1a63c2947e55977be01a81b4/langchain_anthropic-1.3.1.tar.gz", hash = "sha256:4f3d7a4a7729ab1aeaf62d32c87d4d227c1b5421668ca9e3734562b383470b07", size = 708940, upload-time = "2026-01-05T21:07:19.345Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8c/f1/cf56d47964b6fe080cdc54c3e32bc05e560927d549b2634b39d14aaf6e05/langchain_anthropic-1.3.3-py3-none-any.whl", hash = "sha256:8008ce5fb680268681673e09f93a9ac08eba9e304477101e5e138f06b5cd8710", size = 46831, upload-time = "2026-02-10T21:02:27.386Z" }, + { url = "https://files.pythonhosted.org/packages/9a/4f/7a5b32764addf4b757545b89899b9d76688176f19e4ee89868e3b8bbfd0f/langchain_anthropic-1.3.1-py3-none-any.whl", hash = "sha256:1fc28cf8037c30597ee6172fc2ff9e345efe8149a8c2a39897b1eebba2948322", size = 46328, upload-time = "2026-01-05T21:07:18.261Z" }, ] [[package]] name = "langchain-aws" -version = "1.2.5" +version = "1.2.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "boto3" }, @@ -967,9 +958,9 @@ dependencies = [ { name = "numpy", version = "2.4.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.12'" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ed/65/a7bcaf190508c995af85d060660125dabfe82447e0421fe6cc7d2a20773a/langchain_aws-1.2.5.tar.gz", hash = "sha256:1966635a8fb19bbd806bce8c7c9adf818748855702a328fa444585a4b0902690", size = 403375, upload-time = "2026-02-11T18:33:12.445Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/10/d48592355296540fff5aaac1aec32d4ecb65f6b7271f07e4f068864eef3f/langchain_aws-1.2.1.tar.gz", hash = "sha256:b41270f7b2303233dd76bc4870f928f97f7f4b3891ce4fedab84096d2faf888e", size = 434741, upload-time = "2026-01-15T03:38:59.498Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f0/33/059753e0265a868de5aec9280e8025753c98191d2d140e55e01fbe75fc0a/langchain_aws-1.2.5-py3-none-any.whl", hash = "sha256:2c04a43d609046f8fb31ab44347a333a9f8b1a73bbcad383db99219a365ca287", size = 165761, upload-time = "2026-02-11T18:33:11.271Z" }, + { url = "https://files.pythonhosted.org/packages/e3/36/e55dda0ad09ae52440e68b7c20b79394a20e274a707f0a5beb193648aaf9/langchain_aws-1.2.1-py3-none-any.whl", hash = "sha256:6bccc727b71e1be90914cda85ef8457a4319598f7b850dfe43e0b95aa7d8f0d9", size = 164422, upload-time = "2026-01-15T03:38:58.038Z" }, ] [[package]] @@ -1033,7 +1024,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "1.2.13" +version = "1.2.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jsonpatch" }, @@ -1045,9 +1036,9 @@ dependencies = [ { name = "typing-extensions" }, { name = "uuid-utils" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fb/bb/c501ca60556c11ac80d1454bdcac63cb33583ce4e64fc4535ad5a7d5c6ba/langchain_core-1.2.13.tar.gz", hash = "sha256:d2773d0d0130a356378db9a858cfeef64c3d64bc03722f1d4d6c40eb46fdf01b", size = 831612, upload-time = "2026-02-15T07:45:57.014Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/0e/664d8d81b3493e09cbab72448d2f9d693d1fa5aa2bcc488602203a9b6da0/langchain_core-1.2.7.tar.gz", hash = "sha256:e1460639f96c352b4a41c375f25aeb8d16ffc1769499fb1c20503aad59305ced", size = 837039, upload-time = "2026-01-09T17:44:25.505Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/12/ab/60fd69e5d55f67d422baefddaaca523c42cd7510ab6aeb17db6ae57fb107/langchain_core-1.2.13-py3-none-any.whl", hash = "sha256:b31823e28d3eff1e237096d0bd3bf80c6f9624eb471a9496dbfbd427779f8d82", size = 500485, upload-time = "2026-02-15T07:45:55.422Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6f/34a9fba14d191a67f7e2ee3dbce3e9b86d2fa7310e2c7f2c713583481bd2/langchain_core-1.2.7-py3-none-any.whl", hash = "sha256:452f4fef7a3d883357b22600788d37e3d8854ef29da345b7ac7099f33c31828b", size = 490232, upload-time = "2026-01-09T17:44:24.236Z" }, ] [[package]] @@ -1080,14 +1071,14 @@ wheels = [ [[package]] name = "langchain-text-splitters" -version = "1.1.1" +version = "1.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/85/38/14121ead61e0e75f79c3a35e5148ac7c2fe754a55f76eab3eed573269524/langchain_text_splitters-1.1.1.tar.gz", hash = "sha256:34861abe7c07d9e49d4dc852d0129e26b32738b60a74486853ec9b6d6a8e01d2", size = 279352, upload-time = "2026-02-18T23:02:42.798Z" } +sdist = { url = "https://files.pythonhosted.org/packages/41/42/c178dcdc157b473330eb7cc30883ea69b8ec60078c7b85e2d521054c4831/langchain_text_splitters-1.1.0.tar.gz", hash = "sha256:75e58acb7585dc9508f3cd9d9809cb14751283226c2d6e21fb3a9ae57582ca22", size = 272230, upload-time = "2025-12-14T01:15:38.659Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/84/66/d9e0c3b83b0ad75ee746c51ba347cacecb8d656b96e1d513f3e334d1ccab/langchain_text_splitters-1.1.1-py3-none-any.whl", hash = "sha256:5ed0d7bf314ba925041e7d7d17cd8b10f688300d5415fb26c29442f061e329dc", size = 35734, upload-time = "2026-02-18T23:02:41.913Z" }, + { url = "https://files.pythonhosted.org/packages/d8/1a/a84ed1c046deecf271356b0179c1b9fba95bfdaa6f934e1849dee26fad7b/langchain_text_splitters-1.1.0-py3-none-any.whl", hash = "sha256:f00341fe883358786104a5f881375ac830a4dd40253ecd42b4c10536c6e4693f", size = 34182, upload-time = "2025-12-14T01:15:37.382Z" }, ] [[package]] @@ -1106,7 +1097,7 @@ wheels = [ [[package]] name = "langgraph" -version = "1.0.8" +version = "1.0.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, @@ -1116,9 +1107,9 @@ dependencies = [ { name = "pydantic" }, { name = "xxhash" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ca/49/e9551965d8a44dd9afdc55cbcdc5a9bd18bee6918cc2395b225d40adb77c/langgraph-1.0.8.tar.gz", hash = "sha256:2630fc578846995114fd659f8b14df9eff5a4e78c49413f67718725e88ceb544", size = 498708, upload-time = "2026-02-06T12:31:13.776Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c2/9c/dac99ab1732e9fb2d3b673482ac28f02bee222c0319a3b8f8f73d90727e6/langgraph-1.0.6.tar.gz", hash = "sha256:dd8e754c76d34a07485308d7117221acf63990e7de8f46ddf5fe256b0a22e6c5", size = 495092, upload-time = "2026-01-12T20:33:30.778Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9a/72/b0d7fc1007821a08dfc03ce232f39f209aa4aa46414ea3d125b24e35093a/langgraph-1.0.8-py3-none-any.whl", hash = "sha256:da737177c024caad7e5262642bece4f54edf4cba2c905a1d1338963f41cf0904", size = 158144, upload-time = "2026-02-06T12:31:12.489Z" }, + { url = "https://files.pythonhosted.org/packages/10/45/9960747781416bed4e531ed0c6b2f2c739bc7b5397d8e92155463735a40e/langgraph-1.0.6-py3-none-any.whl", hash = "sha256:bcfce190974519c72e29f6e5b17f0023914fd6f936bfab8894083215b271eb89", size = 157356, upload-time = "2026-01-12T20:33:29.191Z" }, ] [[package]] @@ -1136,15 +1127,15 @@ wheels = [ [[package]] name = "langgraph-prebuilt" -version = "1.0.7" +version = "1.0.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, { name = "langgraph-checkpoint" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a7/59/711aecd1a50999456850dc328f3cad72b4372d8218838d8d5326f80cb76f/langgraph_prebuilt-1.0.7.tar.gz", hash = "sha256:38e097e06de810de4d0e028ffc0e432bb56d1fb417620fb1dfdc76c5e03e4bf9", size = 163692, upload-time = "2026-01-22T16:45:22.801Z" } +sdist = { url = "https://files.pythonhosted.org/packages/3c/f5/8c75dace0d729561dce2966e630c5e312193df7e5df41a7e10cd7378c3a7/langgraph_prebuilt-1.0.6.tar.gz", hash = "sha256:c5f6cf0f5a0ac47643d2e26ae6faa38cb28885ecde67911190df9e30c4f72361", size = 162623, upload-time = "2026-01-12T20:31:28.425Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/47/49/5e37abb3f38a17a3487634abc2a5da87c208cc1d14577eb8d7184b25c886/langgraph_prebuilt-1.0.7-py3-none-any.whl", hash = "sha256:e14923516504405bb5edc3977085bc9622c35476b50c1808544490e13871fe7c", size = 35324, upload-time = "2026-01-22T16:45:21.784Z" }, + { url = "https://files.pythonhosted.org/packages/26/6c/4045822b0630cfc0f8624c4499ceaf90644142143c063a8dc385a7424fc3/langgraph_prebuilt-1.0.6-py3-none-any.whl", hash = "sha256:9fdc35048ff4ac985a55bd2a019a86d45b8184551504aff6780d096c678b39ae", size = 35322, upload-time = "2026-01-12T20:31:27.161Z" }, ] [[package]] @@ -1459,7 +1450,7 @@ wheels = [ [[package]] name = "openai" -version = "2.21.0" +version = "1.109.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1471,9 +1462,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/92/e5/3d197a0947a166649f566706d7a4c8f7fe38f1fa7b24c9bcffe4c7591d44/openai-2.21.0.tar.gz", hash = "sha256:81b48ce4b8bbb2cc3af02047ceb19561f7b1dc0d4e52d1de7f02abfd15aa59b7", size = 644374, upload-time = "2026-02-14T00:12:01.577Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c6/a1/a303104dc55fc546a3f6914c842d3da471c64eec92043aef8f652eb6c524/openai-1.109.1.tar.gz", hash = "sha256:d173ed8dbca665892a6db099b4a2dfac624f94d20a93f46eb0b56aae940ed869", size = 564133, upload-time = "2025-09-24T13:00:53.075Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cc/56/0a89092a453bb2c676d66abee44f863e742b2110d4dbb1dbcca3f7e5fc33/openai-2.21.0-py3-none-any.whl", hash = "sha256:0bc1c775e5b1536c294eded39ee08f8407656537ccc71b1004104fe1602e267c", size = 1103065, upload-time = "2026-02-14T00:11:59.603Z" }, + { url = "https://files.pythonhosted.org/packages/1d/2a/7dd3d207ec669cacc1f186fd856a0f61dbc255d24f6fdc1a6715d6051b0f/openai-1.109.1-py3-none-any.whl", hash = "sha256:6bcaf57086cf59159b8e27447e4e7dd019db5d29a438072fbd49c290c7e65315", size = 948627, upload-time = "2025-09-24T13:00:50.754Z" }, ] [[package]] @@ -1506,7 +1497,7 @@ wheels = [ [[package]] name = "opentelemetry-instrumentation-bedrock" -version = "0.52.4" +version = "0.50.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anthropic" }, @@ -1516,9 +1507,9 @@ dependencies = [ { name = "opentelemetry-semantic-conventions-ai" }, { name = "tokenizers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/7d/ea/1cdf7a8caa624043fe02eb90248e73269fae9ab275816f6fabcc34f60538/opentelemetry_instrumentation_bedrock-0.52.4.tar.gz", hash = "sha256:d785b14338d475e85e3d2074840e71771c430f55efbad7ed27a9856e6d835771", size = 149890, upload-time = "2026-02-19T13:21:42.301Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/19/d26f2fb88228f3b3eee70e216fe708859dc540a2fbd3ced3cee36135c663/opentelemetry_instrumentation_bedrock-0.50.1.tar.gz", hash = "sha256:78e863e2d75dbe1cc5f1f15a69625de989968d8beee07c35a9e746502b7795f6", size = 15327, upload-time = "2025-12-16T08:26:53.873Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1a/8a/02435bcdc4c45ff5b0816547fa5b4613ccce2671ec1deb23818f6f284346/opentelemetry_instrumentation_bedrock-0.52.4-py3-none-any.whl", hash = "sha256:ef5cc6f0ba78c500476f2781134beac19bc83279aeaa827dd90c5b09c20d18ae", size = 19361, upload-time = "2026-02-19T13:20:58.869Z" }, + { url = "https://files.pythonhosted.org/packages/3b/fb/8c04c2b33c6973f8cb7706d7a787dfbcf7111a718516282c8c24ba3a2558/opentelemetry_instrumentation_bedrock-0.50.1-py3-none-any.whl", hash = "sha256:dc341fde7b45f83f093ad70dea611d5d0439e74cfafab02a80c17c1113376e38", size = 19045, upload-time = "2025-12-16T08:26:17.389Z" }, ] [[package]] @@ -1576,7 +1567,7 @@ requires-dist = [ { name = "opentelemetry-api", specifier = ">=1.38.0,<2" }, { name = "opentelemetry-instrumentation", specifier = ">=0.59b0" }, { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0" }, - { name = "opentelemetry-semantic-conventions-ai", specifier = ">=0.4.13,<0.5.0" }, + { name = "opentelemetry-semantic-conventions-ai", editable = "../opentelemetry-semantic-conventions-ai" }, ] provides-extras = ["instruments"] @@ -1615,7 +1606,7 @@ test = [ [[package]] name = "opentelemetry-instrumentation-openai" -version = "0.52.3" +version = "0.50.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-api" }, @@ -1623,9 +1614,9 @@ dependencies = [ { name = "opentelemetry-semantic-conventions" }, { name = "opentelemetry-semantic-conventions-ai" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0c/0c/01ed8d35ffc9a2279ed983a46283520904d47577a895c3072dad7dde932d/opentelemetry_instrumentation_openai-0.52.3.tar.gz", hash = "sha256:6f88c48538b0829b8cb62d4a80aa0f3f73e22bfab59c98f67ef251a1e5bd1d32", size = 6978364, upload-time = "2026-02-10T14:55:13.452Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ef/f4/2cd7698431474102e85466fe197ffa440d8309e59a1260b62d85602e472a/opentelemetry_instrumentation_openai-0.50.1.tar.gz", hash = "sha256:43eea552ca80cc31f0197fac3458b53c4dbc2cff8f80aa9aa9d3fe899dba9190", size = 32262, upload-time = "2025-12-16T08:27:06.986Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/da/3f/af4f73cd29b9fdf27dac9f0a76c5e037308f7f37ba3af24b2fdadd76cb30/opentelemetry_instrumentation_openai-0.52.3-py3-none-any.whl", hash = "sha256:7cd786fc2d6663c0a02bc0b9fdc54e54492107f50436a0bef3a610ecaf55afbc", size = 43082, upload-time = "2026-02-10T14:54:28.563Z" }, + { url = "https://files.pythonhosted.org/packages/ae/62/909c5d0674f24d36388ca0b2454d84f9e5b09765b0294ba5b9a8f6b7d9e7/opentelemetry_instrumentation_openai-0.50.1-py3-none-any.whl", hash = "sha256:ea0ca70f09f1bcfd6a188d5122327386386dcc9b85fbf7187dd34e38d32c126d", size = 43002, upload-time = "2025-12-16T08:26:36.569Z" }, ] [[package]] @@ -1657,11 +1648,25 @@ wheels = [ [[package]] name = "opentelemetry-semantic-conventions-ai" -version = "0.4.13" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ba/e6/40b59eda51ac47009fb47afcdf37c6938594a0bd7f3b9fadcbc6058248e3/opentelemetry_semantic_conventions_ai-0.4.13.tar.gz", hash = "sha256:94efa9fb4ffac18c45f54a3a338ffeb7eedb7e1bb4d147786e77202e159f0036", size = 5368, upload-time = "2025-08-22T10:14:17.387Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/35/b5/cf25da2218910f0d6cdf7f876a06bed118c4969eacaf60a887cbaef44f44/opentelemetry_semantic_conventions_ai-0.4.13-py3-none-any.whl", hash = "sha256:883a30a6bb5deaec0d646912b5f9f6dcbb9f6f72557b73d0f2560bf25d13e2d5", size = 6080, upload-time = "2025-08-22T10:14:16.477Z" }, +version = "0.4.14" +source = { editable = "../opentelemetry-semantic-conventions-ai" } +dependencies = [ + { name = "opentelemetry-sdk" }, + { name = "opentelemetry-semantic-conventions" }, +] + +[package.metadata] +requires-dist = [ + { name = "opentelemetry-sdk", specifier = ">=1.38.0,<2" }, + { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "autopep8", specifier = ">=2.2.0,<3" }, + { name = "pytest", specifier = ">=8.2.2,<9" }, + { name = "pytest-sugar", specifier = "==1.0.0" }, + { name = "ruff", specifier = ">=0.4.0" }, ] [[package]] @@ -2117,16 +2122,14 @@ wheels = [ [[package]] name = "pytest-asyncio" -version = "1.3.0" +version = "0.24.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "backports-asyncio-runner", marker = "python_full_version < '3.11'" }, { name = "pytest" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087, upload-time = "2025-11-10T16:07:47.256Z" } +sdist = { url = "https://files.pythonhosted.org/packages/52/6d/c6cf50ce320cf8611df7a1254d86233b3df7cc07f9b5f5cbcb82e08aa534/pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276", size = 49855, upload-time = "2024-08-22T08:03:18.145Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075, upload-time = "2025-11-10T16:07:45.537Z" }, + { url = "https://files.pythonhosted.org/packages/96/31/6607dab48616902f76885dfcf62c08d929796fc3b2d2318faf9fd54dbed9/pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b", size = 18024, upload-time = "2024-08-22T08:03:15.536Z" }, ] [[package]] @@ -2390,27 +2393,28 @@ wheels = [ [[package]] name = "ruff" -version = "0.15.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/04/dc/4e6ac71b511b141cf626357a3946679abeba4cf67bc7cc5a17920f31e10d/ruff-0.15.1.tar.gz", hash = "sha256:c590fe13fb57c97141ae975c03a1aedb3d3156030cabd740d6ff0b0d601e203f", size = 4540855, upload-time = "2026-02-12T23:09:09.998Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/23/bf/e6e4324238c17f9d9120a9d60aa99a7daaa21204c07fcd84e2ef03bb5fd1/ruff-0.15.1-py3-none-linux_armv6l.whl", hash = "sha256:b101ed7cf4615bda6ffe65bdb59f964e9f4a0d3f85cbf0e54f0ab76d7b90228a", size = 10367819, upload-time = "2026-02-12T23:09:03.598Z" }, - { url = "https://files.pythonhosted.org/packages/b3/ea/c8f89d32e7912269d38c58f3649e453ac32c528f93bb7f4219258be2e7ed/ruff-0.15.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:939c995e9277e63ea632cc8d3fae17aa758526f49a9a850d2e7e758bfef46602", size = 10798618, upload-time = "2026-02-12T23:09:22.928Z" }, - { url = "https://files.pythonhosted.org/packages/5e/0f/1d0d88bc862624247d82c20c10d4c0f6bb2f346559d8af281674cf327f15/ruff-0.15.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:1d83466455fdefe60b8d9c8df81d3c1bbb2115cede53549d3b522ce2bc703899", size = 10148518, upload-time = "2026-02-12T23:08:58.339Z" }, - { url = "https://files.pythonhosted.org/packages/f5/c8/291c49cefaa4a9248e986256df2ade7add79388fe179e0691be06fae6f37/ruff-0.15.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9457e3c3291024866222b96108ab2d8265b477e5b1534c7ddb1810904858d16", size = 10518811, upload-time = "2026-02-12T23:09:31.865Z" }, - { url = "https://files.pythonhosted.org/packages/c3/1a/f5707440e5ae43ffa5365cac8bbb91e9665f4a883f560893829cf16a606b/ruff-0.15.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92c92b003e9d4f7fbd33b1867bb15a1b785b1735069108dfc23821ba045b29bc", size = 10196169, upload-time = "2026-02-12T23:09:17.306Z" }, - { url = "https://files.pythonhosted.org/packages/2a/ff/26ddc8c4da04c8fd3ee65a89c9fb99eaa5c30394269d424461467be2271f/ruff-0.15.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fe5c41ab43e3a06778844c586251eb5a510f67125427625f9eb2b9526535779", size = 10990491, upload-time = "2026-02-12T23:09:25.503Z" }, - { url = "https://files.pythonhosted.org/packages/fc/00/50920cb385b89413f7cdb4bb9bc8fc59c1b0f30028d8bccc294189a54955/ruff-0.15.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66a6dd6df4d80dc382c6484f8ce1bcceb55c32e9f27a8b94c32f6c7331bf14fb", size = 11843280, upload-time = "2026-02-12T23:09:19.88Z" }, - { url = "https://files.pythonhosted.org/packages/5d/6d/2f5cad8380caf5632a15460c323ae326f1e1a2b5b90a6ee7519017a017ca/ruff-0.15.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a4a42cbb8af0bda9bcd7606b064d7c0bc311a88d141d02f78920be6acb5aa83", size = 11274336, upload-time = "2026-02-12T23:09:14.907Z" }, - { url = "https://files.pythonhosted.org/packages/a3/1d/5f56cae1d6c40b8a318513599b35ea4b075d7dc1cd1d04449578c29d1d75/ruff-0.15.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ab064052c31dddada35079901592dfba2e05f5b1e43af3954aafcbc1096a5b2", size = 11137288, upload-time = "2026-02-12T23:09:07.475Z" }, - { url = "https://files.pythonhosted.org/packages/cd/20/6f8d7d8f768c93b0382b33b9306b3b999918816da46537d5a61635514635/ruff-0.15.1-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:5631c940fe9fe91f817a4c2ea4e81f47bee3ca4aa646134a24374f3c19ad9454", size = 11070681, upload-time = "2026-02-12T23:08:55.43Z" }, - { url = "https://files.pythonhosted.org/packages/9a/67/d640ac76069f64cdea59dba02af2e00b1fa30e2103c7f8d049c0cff4cafd/ruff-0.15.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:68138a4ba184b4691ccdc39f7795c66b3c68160c586519e7e8444cf5a53e1b4c", size = 10486401, upload-time = "2026-02-12T23:09:27.927Z" }, - { url = "https://files.pythonhosted.org/packages/65/3d/e1429f64a3ff89297497916b88c32a5cc88eeca7e9c787072d0e7f1d3e1e/ruff-0.15.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:518f9af03bfc33c03bdb4cb63fabc935341bb7f54af500f92ac309ecfbba6330", size = 10197452, upload-time = "2026-02-12T23:09:12.147Z" }, - { url = "https://files.pythonhosted.org/packages/78/83/e2c3bade17dad63bf1e1c2ffaf11490603b760be149e1419b07049b36ef2/ruff-0.15.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:da79f4d6a826caaea95de0237a67e33b81e6ec2e25fc7e1993a4015dffca7c61", size = 10693900, upload-time = "2026-02-12T23:09:34.418Z" }, - { url = "https://files.pythonhosted.org/packages/a1/27/fdc0e11a813e6338e0706e8b39bb7a1d61ea5b36873b351acee7e524a72a/ruff-0.15.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3dd86dccb83cd7d4dcfac303ffc277e6048600dfc22e38158afa208e8bf94a1f", size = 11227302, upload-time = "2026-02-12T23:09:36.536Z" }, - { url = "https://files.pythonhosted.org/packages/f6/58/ac864a75067dcbd3b95be5ab4eb2b601d7fbc3d3d736a27e391a4f92a5c1/ruff-0.15.1-py3-none-win32.whl", hash = "sha256:660975d9cb49b5d5278b12b03bb9951d554543a90b74ed5d366b20e2c57c2098", size = 10462555, upload-time = "2026-02-12T23:09:29.899Z" }, - { url = "https://files.pythonhosted.org/packages/e0/5e/d4ccc8a27ecdb78116feac4935dfc39d1304536f4296168f91ed3ec00cd2/ruff-0.15.1-py3-none-win_amd64.whl", hash = "sha256:c820fef9dd5d4172a6570e5721704a96c6679b80cf7be41659ed439653f62336", size = 11599956, upload-time = "2026-02-12T23:09:01.157Z" }, - { url = "https://files.pythonhosted.org/packages/2a/07/5bda6a85b220c64c65686bc85bd0bbb23b29c62b3a9f9433fa55f17cda93/ruff-0.15.1-py3-none-win_arm64.whl", hash = "sha256:5ff7d5f0f88567850f45081fac8f4ec212be8d0b963e385c3f7d0d2eb4899416", size = 10874604, upload-time = "2026-02-12T23:09:05.515Z" }, +version = "0.14.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d4/77/9a7fe084d268f8855d493e5031ea03fa0af8cc05887f638bf1c4e3363eb8/ruff-0.14.11.tar.gz", hash = "sha256:f6dc463bfa5c07a59b1ff2c3b9767373e541346ea105503b4c0369c520a66958", size = 5993417, upload-time = "2026-01-08T19:11:58.322Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/a6/a4c40a5aaa7e331f245d2dc1ac8ece306681f52b636b40ef87c88b9f7afd/ruff-0.14.11-py3-none-linux_armv6l.whl", hash = "sha256:f6ff2d95cbd335841a7217bdfd9c1d2e44eac2c584197ab1385579d55ff8830e", size = 12951208, upload-time = "2026-01-08T19:12:09.218Z" }, + { url = "https://files.pythonhosted.org/packages/5c/5c/360a35cb7204b328b685d3129c08aca24765ff92b5a7efedbdd6c150d555/ruff-0.14.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f6eb5c1c8033680f4172ea9c8d3706c156223010b8b97b05e82c59bdc774ee6", size = 13330075, upload-time = "2026-01-08T19:12:02.549Z" }, + { url = "https://files.pythonhosted.org/packages/1b/9e/0cc2f1be7a7d33cae541824cf3f95b4ff40d03557b575912b5b70273c9ec/ruff-0.14.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f2fc34cc896f90080fca01259f96c566f74069a04b25b6205d55379d12a6855e", size = 12257809, upload-time = "2026-01-08T19:12:00.366Z" }, + { url = "https://files.pythonhosted.org/packages/a7/e5/5faab97c15bb75228d9f74637e775d26ac703cc2b4898564c01ab3637c02/ruff-0.14.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53386375001773ae812b43205d6064dae49ff0968774e6befe16a994fc233caa", size = 12678447, upload-time = "2026-01-08T19:12:13.899Z" }, + { url = "https://files.pythonhosted.org/packages/1b/33/e9767f60a2bef779fb5855cab0af76c488e0ce90f7bb7b8a45c8a2ba4178/ruff-0.14.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a697737dce1ca97a0a55b5ff0434ee7205943d4874d638fe3ae66166ff46edbe", size = 12758560, upload-time = "2026-01-08T19:11:42.55Z" }, + { url = "https://files.pythonhosted.org/packages/eb/84/4c6cf627a21462bb5102f7be2a320b084228ff26e105510cd2255ea868e5/ruff-0.14.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6845ca1da8ab81ab1dce755a32ad13f1db72e7fba27c486d5d90d65e04d17b8f", size = 13599296, upload-time = "2026-01-08T19:11:30.371Z" }, + { url = "https://files.pythonhosted.org/packages/88/e1/92b5ed7ea66d849f6157e695dc23d5d6d982bd6aa8d077895652c38a7cae/ruff-0.14.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e36ce2fd31b54065ec6f76cb08d60159e1b32bdf08507862e32f47e6dde8bcbf", size = 15048981, upload-time = "2026-01-08T19:12:04.742Z" }, + { url = "https://files.pythonhosted.org/packages/61/df/c1bd30992615ac17c2fb64b8a7376ca22c04a70555b5d05b8f717163cf9f/ruff-0.14.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:590bcc0e2097ecf74e62a5c10a6b71f008ad82eb97b0a0079e85defe19fe74d9", size = 14633183, upload-time = "2026-01-08T19:11:40.069Z" }, + { url = "https://files.pythonhosted.org/packages/04/e9/fe552902f25013dd28a5428a42347d9ad20c4b534834a325a28305747d64/ruff-0.14.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:53fe71125fc158210d57fe4da26e622c9c294022988d08d9347ec1cf782adafe", size = 14050453, upload-time = "2026-01-08T19:11:37.555Z" }, + { url = "https://files.pythonhosted.org/packages/ae/93/f36d89fa021543187f98991609ce6e47e24f35f008dfe1af01379d248a41/ruff-0.14.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a35c9da08562f1598ded8470fcfef2afb5cf881996e6c0a502ceb61f4bc9c8a3", size = 13757889, upload-time = "2026-01-08T19:12:07.094Z" }, + { url = "https://files.pythonhosted.org/packages/b7/9f/c7fb6ecf554f28709a6a1f2a7f74750d400979e8cd47ed29feeaa1bd4db8/ruff-0.14.11-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:0f3727189a52179393ecf92ec7057c2210203e6af2676f08d92140d3e1ee72c1", size = 13955832, upload-time = "2026-01-08T19:11:55.064Z" }, + { url = "https://files.pythonhosted.org/packages/db/a0/153315310f250f76900a98278cf878c64dfb6d044e184491dd3289796734/ruff-0.14.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:eb09f849bd37147a789b85995ff734a6c4a095bed5fd1608c4f56afc3634cde2", size = 12586522, upload-time = "2026-01-08T19:11:35.356Z" }, + { url = "https://files.pythonhosted.org/packages/2f/2b/a73a2b6e6d2df1d74bf2b78098be1572191e54bec0e59e29382d13c3adc5/ruff-0.14.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:c61782543c1231bf71041461c1f28c64b961d457d0f238ac388e2ab173d7ecb7", size = 12724637, upload-time = "2026-01-08T19:11:47.796Z" }, + { url = "https://files.pythonhosted.org/packages/f0/41/09100590320394401cd3c48fc718a8ba71c7ddb1ffd07e0ad6576b3a3df2/ruff-0.14.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:82ff352ea68fb6766140381748e1f67f83c39860b6446966cff48a315c3e2491", size = 13145837, upload-time = "2026-01-08T19:11:32.87Z" }, + { url = "https://files.pythonhosted.org/packages/3b/d8/e035db859d1d3edf909381eb8ff3e89a672d6572e9454093538fe6f164b0/ruff-0.14.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:728e56879df4ca5b62a9dde2dd0eb0edda2a55160c0ea28c4025f18c03f86984", size = 13850469, upload-time = "2026-01-08T19:12:11.694Z" }, + { url = "https://files.pythonhosted.org/packages/4e/02/bb3ff8b6e6d02ce9e3740f4c17dfbbfb55f34c789c139e9cd91985f356c7/ruff-0.14.11-py3-none-win32.whl", hash = "sha256:337c5dd11f16ee52ae217757d9b82a26400be7efac883e9e852646f1557ed841", size = 12851094, upload-time = "2026-01-08T19:11:45.163Z" }, + { url = "https://files.pythonhosted.org/packages/58/f1/90ddc533918d3a2ad628bc3044cdfc094949e6d4b929220c3f0eb8a1c998/ruff-0.14.11-py3-none-win_amd64.whl", hash = "sha256:f981cea63d08456b2c070e64b79cb62f951aa1305282974d4d5216e6e0178ae6", size = 14001379, upload-time = "2026-01-08T19:11:52.591Z" }, + { url = "https://files.pythonhosted.org/packages/c4/1c/1dbe51782c0e1e9cfce1d1004752672d2d4629ea46945d19d731ad772b3b/ruff-0.14.11-py3-none-win_arm64.whl", hash = "sha256:649fb6c9edd7f751db276ef42df1f3df41c38d67d199570ae2a7bd6cbc3590f0", size = 12938644, upload-time = "2026-01-08T19:11:50.027Z" }, ] [[package]] diff --git a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py index 46539007b3..8bb286d0e9 100644 --- a/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py +++ b/packages/opentelemetry-semantic-conventions-ai/opentelemetry/semconv_ai/__init__.py @@ -260,6 +260,24 @@ class SpanAttributes: MCP_SESSION_INIT_OPTIONS = "mcp.session.init_options" MCP_RESPONSE_VALUE = "mcp.response.value" + # GenAI Task Attributes (custom - not yet in official OTel semconv) + GEN_AI_TASK_ID = "gen_ai.task.id" + GEN_AI_TASK_NAME = "gen_ai.task.name" + GEN_AI_TASK_PARENT_ID = "gen_ai.task.parent.id" + GEN_AI_TASK_INPUT = "gen_ai.task.input" + GEN_AI_TASK_OUTPUT = "gen_ai.task.output" + GEN_AI_TASK_STATUS = "gen_ai.task.status" + GEN_AI_TASK_KIND = "gen_ai.task.kind" + + # GenAI Workflow Attributes (custom - not yet in official OTel semconv) + GEN_AI_WORKFLOW_NODES = "gen_ai.workflow.nodes" + GEN_AI_WORKFLOW_EDGES = "gen_ai.workflow.edges" + + # LangGraph-specific Attributes (vendor namespace) + LANGGRAPH_COMMAND_SOURCE_NODE = "langgraph.command.source_node" + LANGGRAPH_COMMAND_GOTO_NODE = "langgraph.command.goto_node" + LANGGRAPH_COMMAND_GOTO_NODES = "langgraph.command.goto_nodes" + class Events(Enum): DB_QUERY_EMBEDDINGS = "db.query.embeddings" @@ -304,3 +322,26 @@ class TraceloopSpanKindValues(Enum): AGENT = "agent" TOOL = "tool" UNKNOWN = "unknown" + + +class GenAICustomOperationName(Enum): + """ + Custom operation names extending the official OpenTelemetry GenAI semantic conventions. + + For standard operations (create_agent, invoke_agent, execute_tool, chat, embeddings), + use the official GenAiOperationNameValues from: + opentelemetry.semconv._incubating.attributes.gen_ai_attributes + + These are agent workflow extensions not yet in the official spec. + """ + + EXECUTE_TASK = "execute_task" + LLM_REQUEST = "llm_request" + VECTOR_DB_RETRIEVE = "vector_db_retrieve" + + +class GenAITaskStatus(Enum): + """Task execution status values for gen_ai.task.status attribute.""" + + SUCCESS = "success" + FAILURE = "failure"