Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -242,8 +242,13 @@ def __call__(

# In legacy chains like LLMChain, suppressing model instrumentations
# within create_llm_span doesn't work, so this should helps as a fallback
context_api.attach(
context_api.set_value(SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, True)
)
try:
context_api.attach(
context_api.set_value(SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, True)
)
except Exception:
# If context setting fails, continue without suppression
# This is not critical for core functionality
pass

return wrapped(*args, **kwargs)
Original file line number Diff line number Diff line change
Expand Up @@ -187,18 +187,57 @@ def _end_span(self, span: Span, run_id: UUID) -> None:
span.end()
token = self.spans[run_id].token
if token:
try:
# Use the runtime context directly to avoid logging from context_api.detach()
from opentelemetry.context import _RUNTIME_CONTEXT
_RUNTIME_CONTEXT.detach(token)
except (ValueError, RuntimeError, Exception):
# Context detach can fail in async scenarios when tokens are created in different contexts
# This includes ValueError, RuntimeError, and other context-related exceptions
# This is expected behavior and doesn't affect the correct span hierarchy
pass
self._safe_detach_context(token)

del self.spans[run_id]

def _safe_attach_context(self, span: Span):
"""
Safely attach span to context, handling potential failures in async scenarios.

Returns the context token for later detachment, or None if attachment fails.
"""
try:
return context_api.attach(set_span_in_context(span))
except Exception:
# Context attachment can fail in some edge cases, particularly in
# complex async scenarios or when context is corrupted.
# Return None to indicate no token needs to be detached later.
return None

def _safe_detach_context(self, token):
"""
Safely detach context token without causing application crashes.

This method implements a fail-safe approach to context detachment that handles
all known edge cases in async/concurrent scenarios where context tokens may
become invalid or be detached in different execution contexts.

We use the runtime context directly to avoid logging errors from context_api.detach()
"""
if not token:
return

try:
# Use the runtime context directly to avoid error logging from context_api.detach()
from opentelemetry.context import _RUNTIME_CONTEXT

_RUNTIME_CONTEXT.detach(token)
except Exception:
# Context detach can fail in async scenarios when tokens are created in different contexts
# This includes ValueError, RuntimeError, and other context-related exceptions
# This is expected behavior and doesn't affect the correct span hierarchy
#
# Common scenarios where this happens:
# 1. Token created in one async task/thread, detached in another
# 2. Context was already detached by another process
# 3. Token became invalid due to context switching
# 4. Race conditions in highly concurrent scenarios
#
# This is safe to ignore as the span itself was properly ended
# and the tracing data is correctly captured.
pass

Comment on lines +208 to +240
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Prefer contextlib.suppress and avoid relying solely on private _RUNTIME_CONTEXT.

Using a private API may break across OTel versions. Try context_api.detach first, then fall back to _RUNTIME_CONTEXT only if needed. Also adopt contextlib.suppress to satisfy linting and reduce noise.

-    def _safe_detach_context(self, token):
+    def _safe_detach_context(self, token: Optional[object]) -> None:
@@
-        try:
-            # Use the runtime context directly to avoid error logging from context_api.detach()
-            from opentelemetry.context import _RUNTIME_CONTEXT
-
-            _RUNTIME_CONTEXT.detach(token)
-        except Exception:
-            # Context detach can fail in async scenarios when tokens are created in different contexts
-            # This includes ValueError, RuntimeError, and other context-related exceptions
-            # This is expected behavior and doesn't affect the correct span hierarchy
-            #
-            # Common scenarios where this happens:
-            # 1. Token created in one async task/thread, detached in another
-            # 2. Context was already detached by another process
-            # 3. Token became invalid due to context switching
-            # 4. Race conditions in highly concurrent scenarios
-            #
-            # This is safe to ignore as the span itself was properly ended
-            # and the tracing data is correctly captured.
-            pass
+        # Attempt a normal detach first; fall back to runtime context if needed.
+        with contextlib.suppress(Exception):
+            context_api.detach(token)
+            return
+        with contextlib.suppress(Exception):
+            from opentelemetry.context import _RUNTIME_CONTEXT  # type: ignore
+            _RUNTIME_CONTEXT.detach(token)

Committable suggestion skipped: line range outside the PR's diff.

def _create_span(
self,
run_id: UUID,
Expand All @@ -220,12 +259,17 @@ def _create_span(
for k, v in metadata.items()
if v is not None
}
context_api.attach(
context_api.set_value(
"association_properties",
{**current_association_properties, **sanitized_metadata},
try:
context_api.attach(
context_api.set_value(
"association_properties",
{**current_association_properties, **sanitized_metadata},
)
)
)
except Exception:
# If setting association properties fails, continue without them
# This doesn't affect the core span functionality
pass
Comment on lines +262 to +272
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Replace try/except/pass with contextlib.suppress and retain the token for later detach (Ruff SIM105).

This aligns with lint guidance and prevents leaking association_properties in the runtime context beyond the span lifecycle.

Apply this diff:

-            try:
-                context_api.attach(
-                    context_api.set_value(
-                        "association_properties",
-                        {**current_association_properties, **sanitized_metadata},
-                    )
-                )
-            except Exception:
-                # If setting association properties fails, continue without them
-                # This doesn't affect the core span functionality
-                pass
+            with contextlib.suppress(Exception):
+                assoc_token = context_api.attach(
+                    context_api.set_value(
+                        "association_properties",
+                        {**current_association_properties, **sanitized_metadata},
+                    )
+                )
+                if assoc_token is not None:
+                    self._association_tokens[run_id] = assoc_token
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
try:
context_api.attach(
context_api.set_value(
"association_properties",
{**current_association_properties, **sanitized_metadata},
)
)
)
except Exception:
# If setting association properties fails, continue without them
# This doesn't affect the core span functionality
pass
with contextlib.suppress(Exception):
assoc_token = context_api.attach(
context_api.set_value(
"association_properties",
{**current_association_properties, **sanitized_metadata},
)
)
if assoc_token is not None:
self._association_tokens[run_id] = assoc_token
🧰 Tools
🪛 Ruff (0.12.2)

262-272: Use contextlib.suppress(Exception) instead of try-except-pass

(SIM105)

🤖 Prompt for AI Agents
In
packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py
around lines 262-272, replace the bare try/except/pass with contextlib.suppress
while capturing the token returned by context_api.attach so it can be detached
later; specifically, import contextlib, use contextlib.suppress(Exception)
around the context_api.attach(...) call and assign its return to a variable
(e.g., token = context_api.attach(...)) and ensure that elsewhere after the span
lifecycle you call context_api.detach(token) to avoid leaking
association_properties in the runtime context beyond the span.


if parent_run_id is not None and parent_run_id in self.spans:
span = self.tracer.start_span(
Expand All @@ -236,7 +280,7 @@ def _create_span(
else:
span = self.tracer.start_span(span_name, kind=kind)

token = context_api.attach(set_span_in_context(span))
token = self._safe_attach_context(span)

_set_span_attribute(span, SpanAttributes.TRACELOOP_WORKFLOW_NAME, workflow_name)
_set_span_attribute(span, SpanAttributes.TRACELOOP_ENTITY_PATH, entity_path)
Expand Down Expand Up @@ -317,9 +361,13 @@ def _create_llm_span(

# we already have an LLM span by this point,
# so skip any downstream instrumentation from here
token = context_api.attach(
context_api.set_value(SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, True)
)
try:
token = context_api.attach(
context_api.set_value(SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, True)
)
except Exception:
# If context setting fails, continue without suppression token
token = None

self.spans[run_id] = SpanHolder(
span, token, None, [], workflow_name, None, entity_path
Expand Down Expand Up @@ -411,11 +459,15 @@ def on_chain_end(

self._end_span(span, run_id)
if parent_run_id is None:
context_api.attach(
context_api.set_value(
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, False
try:
context_api.attach(
context_api.set_value(
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, False
)
)
)
except Exception:
# If context reset fails, it's not critical for functionality
pass

@dont_throw
def on_chat_model_start(
Expand Down Expand Up @@ -513,8 +565,12 @@ def on_llm_end(
if model_name is None:
model_name = extract_model_name_from_response_metadata(response)
if model_name is None and hasattr(context_api, "get_value"):
association_properties = context_api.get_value("association_properties") or {}
model_name = _extract_model_name_from_association_metadata(association_properties)
association_properties = (
context_api.get_value("association_properties") or {}
)
model_name = _extract_model_name_from_association_metadata(
association_properties
)
token_usage = (response.llm_output or {}).get("token_usage") or (
response.llm_output or {}
).get("usage")
Expand Down