Skip to content

Commit 987d252

Browse files
committed
adding halting
1 parent 14bb1e6 commit 987d252

File tree

1 file changed

+17
-29
lines changed
  • traceloop-sdk/lib/traceloop

1 file changed

+17
-29
lines changed

traceloop-sdk/lib/traceloop/sdk.rb

Lines changed: 17 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ class Traceloop
88
def initialize
99
OpenTelemetry::SDK.configure do |c|
1010
c.add_span_processor(
11-
OpenTelemetry::SDK::Trace::Export::SimpleSpanProcessor.new(
11+
OpenTelemetry::SDK::Trace::Export::BatchSpanProcessor.new(
1212
OpenTelemetry::Exporter::OTLP::Exporter.new(
1313
endpoint: "#{ENV.fetch("TRACELOOP_BASE_URL", "https://api.traceloop.com")}/v1/traces",
1414
headers: {
@@ -61,7 +61,9 @@ def log_response(response)
6161
log_bedrock_response(response)
6262
# Check for RubyLLM::Message objects
6363
elsif response.instance_of?(::RubyLLM::Message)
64-
log_ruby_llm_response(response)
64+
log_ruby_llm_message(response)
65+
elsif response.instance_of?(::RubyLLM::Tool::Halt)
66+
log_ruby_llm_halt(response)
6567
# This is Gemini specific, see -
6668
# https://github.com/gbaptista/gemini-ai?tab=readme-ov-file#generate_content
6769
elsif response.respond_to?(:has_key?) && response.has_key?("candidates")
@@ -82,36 +84,22 @@ def log_gemini_response(response)
8284
})
8385
end
8486

85-
def log_ruby_llm_response(response)
86-
model = response.respond_to?(:model_id) ? response.model_id : @model
87+
def log_ruby_llm_message(response)
8788
@span.add_attributes({
88-
OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_RESPONSE_MODEL => model,
89+
OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_RESPONSE_MODEL => response.model_id,
90+
OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_USAGE_COMPLETION_TOKENS => response.output_tokens,
91+
OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_USAGE_PROMPT_TOKENS => response.input_tokens,
92+
"#{OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_COMPLETIONS}.0.role" => response.role.to_s,
93+
"#{OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_COMPLETIONS}.0.content" => response.content
8994
})
95+
end
9096

91-
if response.respond_to?(:input_tokens) && response.input_tokens &&
92-
response.respond_to?(:output_tokens) && response.output_tokens
93-
@span.add_attributes({
94-
OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_USAGE_COMPLETION_TOKENS => response.output_tokens,
95-
OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_USAGE_PROMPT_TOKENS => response.input_tokens,
96-
})
97-
end
98-
99-
if response.respond_to?(:content) && response.content
100-
content_text = ""
101-
role = response.respond_to?(:role) ? response.role.to_s : "assistant"
102-
103-
# Handle RubyLLM::Content object
104-
if response.content.respond_to?(:text)
105-
content_text = response.content.text
106-
elsif response.content.respond_to?(:to_s)
107-
content_text = response.content.to_s
108-
end
109-
110-
@span.add_attributes({
111-
"#{OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_COMPLETIONS}.0.role" => role,
112-
"#{OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_COMPLETIONS}.0.content" => content_text
113-
})
114-
end
97+
def log_ruby_llm_halt(response)
98+
@span.add_attributes({
99+
OpenTelemetry::SemanticConventionsAi::SpanAttributes::LLM_RESPONSE_MODEL => @model,
100+
"#{OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_COMPLETIONS}.0.role" => "tool",
101+
"#{OpenTelemetry::SemanticConventionsAi::SpanAttributes::GEN_AI_COMPLETIONS}.0.content" => response.content
102+
})
115103
end
116104

117105
def log_bedrock_response(response)

0 commit comments

Comments
 (0)