Skip to content
66 changes: 62 additions & 4 deletions sentry_sdk/integrations/langgraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,13 @@ def _normalize_langgraph_message(message):

parsed = {"role": getattr(message, "type", None), "content": message.content}

for attr in ["name", "tool_calls", "function_call", "tool_call_id"]:
for attr in [
"name",
"tool_calls",
"function_call",
"tool_call_id",
"response_metadata",
]:
if hasattr(message, attr):
value = getattr(message, attr)
if value is not None:
Expand Down Expand Up @@ -311,14 +317,66 @@ def _extract_tool_calls(messages):
return tool_calls if tool_calls else None


def _set_response_attributes(span, input_messages, result, integration):
# type: (Any, Optional[List[Any]], Any, LanggraphIntegration) -> None
if not (should_send_default_pii() and integration.include_prompts):
def _set_usage_data(span, messages):
# type: (sentry_sdk.tracing.Span, Any) -> None
input_tokens = 0
output_tokens = 0
total_tokens = 0

for message in messages:
response_metadata = message.get("response_metadata")
if response_metadata is None:
continue

token_usage = response_metadata.get("token_usage")
if not token_usage:
continue

input_tokens += int(token_usage.get("prompt_tokens", 0))
output_tokens += int(token_usage.get("completion_tokens", 0))
total_tokens += int(token_usage.get("total_tokens", 0))

if input_tokens > 0:
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)

if output_tokens > 0:
span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)

if total_tokens > 0:
span.set_data(
SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS,
total_tokens,
)


def _set_response_model_name(span, messages):
# type: (sentry_sdk.tracing.Span, Any) -> None
last_message = messages[-1]
response_metadata = last_message.get("response_metadata")
if response_metadata is None:
return

model_name = response_metadata.get("model_name")
if model_name is None:
return

set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, model_name)


def _set_response_attributes(span, input_messages, result, integration):
# type: (Any, Optional[List[Any]], Any, LanggraphIntegration) -> None
parsed_response_messages = _parse_langgraph_messages(result)
new_messages = _get_new_messages(input_messages, parsed_response_messages)

if new_messages is None:
return

_set_usage_data(span, new_messages)
_set_response_model_name(span, new_messages)

if not (should_send_default_pii() and integration.include_prompts):
return

llm_response_text = _extract_llm_response_text(new_messages)
if llm_response_text:
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, llm_response_text)
Expand Down
Loading