Skip to content

Commit 94d9ea3

Browse files
fix(usage): Normalize None token detail objects on Usage initialization
Extends #2034 to handle providers that return None for entire input_tokens_details and output_tokens_details objects (not just the fields within them). This affects non-streaming responses. Related to #1179 (which fixed the streaming case). Some providers like llama-stack return null for these optional fields in their JSON responses. The OpenAI SDK maps these to None in Python. Previously, passing None to the Usage constructor would fail Pydantic validation before __post_init__ could normalize them. This PR uses Pydantic's BeforeValidator to normalize None values at the field level, before Pydantic's type validation runs. Signed-off-by: Adrian Cole <[email protected]>
1 parent 0d2d771 commit 94d9ea3

File tree

3 files changed

+38
-19
lines changed

3 files changed

+38
-19
lines changed

src/agents/models/openai_chatcompletions.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -102,18 +102,8 @@ async def get_response(
102102
input_tokens=response.usage.prompt_tokens,
103103
output_tokens=response.usage.completion_tokens,
104104
total_tokens=response.usage.total_tokens,
105-
input_tokens_details=InputTokensDetails(
106-
cached_tokens=getattr(
107-
response.usage.prompt_tokens_details, "cached_tokens", 0
108-
)
109-
or 0,
110-
),
111-
output_tokens_details=OutputTokensDetails(
112-
reasoning_tokens=getattr(
113-
response.usage.completion_tokens_details, "reasoning_tokens", 0
114-
)
115-
or 0,
116-
),
105+
input_tokens_details=response.usage.prompt_tokens_details,
106+
output_tokens_details=response.usage.completion_tokens_details,
117107
)
118108
if response.usage
119109
else Usage()

src/agents/usage.py

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,21 @@
11
from dataclasses import field
2+
from typing import Annotated, Any
23

34
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
5+
from pydantic import BeforeValidator
46
from pydantic.dataclasses import dataclass
57

68

9+
def _normalize_input_tokens_details(v: Any) -> InputTokensDetails:
10+
"""Normalize None to default InputTokensDetails for providers that omit this field."""
11+
return v if v is not None else InputTokensDetails(cached_tokens=0)
12+
13+
14+
def _normalize_output_tokens_details(v: Any) -> OutputTokensDetails:
15+
"""Normalize None to default OutputTokensDetails for providers that omit this field."""
16+
return v if v is not None else OutputTokensDetails(reasoning_tokens=0)
17+
18+
719
@dataclass
820
class RequestUsage:
921
"""Usage details for a single API request."""
@@ -32,16 +44,16 @@ class Usage:
3244
input_tokens: int = 0
3345
"""Total input tokens sent, across all requests."""
3446

35-
input_tokens_details: InputTokensDetails = field(
36-
default_factory=lambda: InputTokensDetails(cached_tokens=0)
37-
)
47+
input_tokens_details: Annotated[
48+
InputTokensDetails, BeforeValidator(_normalize_input_tokens_details)
49+
] = field(default_factory=lambda: InputTokensDetails(cached_tokens=0))
3850
"""Details about the input tokens, matching responses API usage details."""
3951
output_tokens: int = 0
4052
"""Total output tokens received, across all requests."""
4153

42-
output_tokens_details: OutputTokensDetails = field(
43-
default_factory=lambda: OutputTokensDetails(reasoning_tokens=0)
44-
)
54+
output_tokens_details: Annotated[
55+
OutputTokensDetails, BeforeValidator(_normalize_output_tokens_details)
56+
] = field(default_factory=lambda: OutputTokensDetails(reasoning_tokens=0))
4557
"""Details about the output tokens, matching responses API usage details."""
4658

4759
total_tokens: int = 0

tests/test_usage.py

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,24 @@ def test_anthropic_cost_calculation_scenario():
270270

271271

272272
def test_usage_normalizes_none_token_details():
273-
# Some providers don't populate optional fields, resulting in None values
273+
# Some providers don't populate optional token detail fields
274+
# (cached_tokens, reasoning_tokens), and the OpenAI SDK's generated
275+
# code can bypass Pydantic validation (e.g., via model_construct),
276+
# allowing None values. We normalize these to 0 to prevent TypeErrors.
277+
278+
# Test entire objects being None (BeforeValidator)
279+
usage = Usage(
280+
requests=1,
281+
input_tokens=100,
282+
input_tokens_details=None,
283+
output_tokens=50,
284+
output_tokens_details=None,
285+
total_tokens=150,
286+
)
287+
assert usage.input_tokens_details.cached_tokens == 0
288+
assert usage.output_tokens_details.reasoning_tokens == 0
289+
290+
# Test fields within objects being None (__post_init__)
274291
input_details = InputTokensDetails(cached_tokens=0)
275292
input_details.__dict__["cached_tokens"] = None
276293

0 commit comments

Comments
 (0)