Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pydantic_ai/profiles/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class OpenAIModelProfile(ModelProfile):

def openai_model_profile(model_name: str) -> ModelProfile:
"""Get the model profile for an OpenAI model."""
is_reasoning_model = model_name.startswith('o')
is_reasoning_model = model_name.startswith('o') or model_name.startswith('gpt-5')
# Structured Outputs (output mode 'native') is only supported with the gpt-4o-mini, gpt-4o-mini-2024-07-18, and gpt-4o-2024-08-06 model snapshots and later.
# We leave it in here for all models because the `default_structured_output_mode` is `'tool'`, so `native` is only used
# when the user specifically uses the `NativeOutput` marker, so an error from the API is acceptable.
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
interactions:
- request:
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '104'
content-type:
- application/json
host:
- api.openai.com
method: POST
parsed_body:
messages:
- content: What is the capital of France?
role: user
model: gpt-5
stream: false
uri: https://api.openai.com/v1/chat/completions
response:
headers:
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
connection:
- keep-alive
content-length:
- '772'
content-type:
- application/json
openai-organization:
- pydantic-28gund
openai-processing-ms:
- '2671'
openai-project:
- proj_dKobscVY9YJxeEaDJen54e3d
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
transfer-encoding:
- chunked
parsed_body:
choices:
- finish_reason: stop
index: 0
message:
annotations: []
content: Paris.
refusal: null
role: assistant
created: 1754902196
id: chatcmpl-C3IW4xlMbxWk92VDDKNyaEJjJrTmh
model: gpt-5-2025-08-07
object: chat.completion
service_tier: default
system_fingerprint: null
usage:
completion_tokens: 11
completion_tokens_details:
accepted_prediction_tokens: 0
audio_tokens: 0
reasoning_tokens: 0
rejected_prediction_tokens: 0
prompt_tokens: 13
prompt_tokens_details:
audio_tokens: 0
cached_tokens: 0
total_tokens: 24
status:
code: 200
message: OK
version: 1
31 changes: 8 additions & 23 deletions tests/models/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -776,7 +776,6 @@ async def test_image_url_input(allow_model_requests: None):
)


@pytest.mark.vcr()
async def test_openai_audio_url_input(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-4o-audio-preview', provider=OpenAIProvider(api_key=openai_api_key))
agent = Agent(m)
Expand All @@ -787,7 +786,6 @@ async def test_openai_audio_url_input(allow_model_requests: None, openai_api_key
)


@pytest.mark.vcr()
async def test_document_url_input(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
agent = Agent(m)
Expand Down Expand Up @@ -879,7 +877,6 @@ async def get_image() -> ImageUrl:
)


@pytest.mark.vcr()
async def test_image_as_binary_content_tool_response(
allow_model_requests: None, image_content: BinaryContent, openai_api_key: str
):
Expand Down Expand Up @@ -960,7 +957,6 @@ async def get_image() -> BinaryContent:
)


@pytest.mark.vcr()
async def test_image_as_binary_content_input(
allow_model_requests: None, image_content: BinaryContent, openai_api_key: str
):
Expand All @@ -981,7 +977,6 @@ async def test_audio_as_binary_content_input(
assert result.output == snapshot('The name mentioned in the audio is Marcelo.')


@pytest.mark.vcr()
async def test_document_as_binary_content_input(
allow_model_requests: None, document_content: BinaryContent, openai_api_key: str
):
Expand Down Expand Up @@ -1045,15 +1040,13 @@ async def get_capital(country: str) -> str:
assert result.output == snapshot('The capital of England is London.')


@pytest.mark.vcr()
async def test_extra_headers(allow_model_requests: None, openai_api_key: str):
# This test doesn't do anything, it's just here to ensure that calls with `extra_headers` don't cause errors, including type.
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
agent = Agent(m, model_settings=OpenAIModelSettings(extra_headers={'Extra-Header-Key': 'Extra-Header-Value'}))
await agent.run('hello')


@pytest.mark.vcr()
async def test_user_id(allow_model_requests: None, openai_api_key: str):
# This test doesn't do anything, it's just here to ensure that calls with `user` don't cause errors, including type.
# Since we use VCR, creating tests with an `httpx.Transport` is not possible.
Expand Down Expand Up @@ -1924,7 +1917,6 @@ async def get_temperature(city: str) -> float:
)


@pytest.mark.vcr()
async def test_openai_responses_model_thinking_part(allow_model_requests: None, openai_api_key: str):
m = OpenAIResponsesModel('o3-mini', provider=OpenAIProvider(api_key=openai_api_key))
settings = OpenAIResponsesModelSettings(openai_reasoning_effort='high', openai_reasoning_summary='detailed')
Expand Down Expand Up @@ -2009,7 +2001,6 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None,
)


@pytest.mark.vcr()
async def test_openai_model_thinking_part(allow_model_requests: None, openai_api_key: str):
provider = OpenAIProvider(api_key=openai_api_key)
responses_model = OpenAIResponsesModel('o3-mini', provider=provider)
Expand Down Expand Up @@ -2098,7 +2089,6 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api
)


@pytest.mark.vcr()
async def test_openai_model_thinking_part_iter(allow_model_requests: None, openai_api_key: str):
provider = OpenAIProvider(api_key=openai_api_key)
responses_model = OpenAIResponsesModel('o3-mini', provider=provider)
Expand Down Expand Up @@ -2127,7 +2117,6 @@ async def test_openai_model_thinking_part_iter(allow_model_requests: None, opena
)


@pytest.mark.vcr()
async def test_openai_instructions_with_logprobs(allow_model_requests: None):
# Create a mock response with logprobs
c = completion_message(
Expand Down Expand Up @@ -2164,7 +2153,6 @@ async def test_openai_instructions_with_logprobs(allow_model_requests: None):
]


@pytest.mark.vcr()
async def test_openai_web_search_tool_model_not_supported(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))
agent = Agent(
Expand All @@ -2175,7 +2163,6 @@ async def test_openai_web_search_tool_model_not_supported(allow_model_requests:
await agent.run('What day is today?')


@pytest.mark.vcr()
async def test_openai_web_search_tool(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-4o-search-preview', provider=OpenAIProvider(api_key=openai_api_key))
agent = Agent(
Expand All @@ -2186,7 +2173,6 @@ async def test_openai_web_search_tool(allow_model_requests: None, openai_api_key
assert result.output == snapshot('May 14, 2025, 8:51:29 AM ')


@pytest.mark.vcr()
async def test_openai_web_search_tool_with_user_location(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-4o-search-preview', provider=OpenAIProvider(api_key=openai_api_key))
agent = Agent(
Expand Down Expand Up @@ -2214,7 +2200,6 @@ async def test_openai_web_search_tool_with_user_location(allow_model_requests: N
""")


@pytest.mark.vcr()
async def test_reasoning_model_with_temperature(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('o3-mini', provider=OpenAIProvider(api_key=openai_api_key))
agent = Agent(m, model_settings=OpenAIModelSettings(temperature=0.5))
Expand Down Expand Up @@ -2319,7 +2304,6 @@ def test_model_profile_strict_not_supported():
)


@pytest.mark.vcr
async def test_compatible_api_with_tool_calls_without_id(allow_model_requests: None, gemini_api_key: str):
provider = OpenAIProvider(
openai_client=AsyncOpenAI(
Expand Down Expand Up @@ -2358,7 +2342,6 @@ def test_openai_response_timestamp_milliseconds(allow_model_requests: None):
assert response.timestamp == snapshot(datetime(2025, 6, 1, 3, 7, 48, tzinfo=timezone.utc))


@pytest.mark.vcr()
async def test_openai_tool_output(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))

Expand Down Expand Up @@ -2453,7 +2436,6 @@ async def get_user_country() -> str:
)


@pytest.mark.vcr()
async def test_openai_text_output_function(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))

Expand Down Expand Up @@ -2533,7 +2515,6 @@ async def get_user_country() -> str:
)


@pytest.mark.vcr()
async def test_openai_native_output(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))

Expand Down Expand Up @@ -2616,7 +2597,6 @@ async def get_user_country() -> str:
)


@pytest.mark.vcr()
async def test_openai_native_output_multiple(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))

Expand Down Expand Up @@ -2705,7 +2685,6 @@ async def get_user_country() -> str:
)


@pytest.mark.vcr()
async def test_openai_prompted_output(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))

Expand Down Expand Up @@ -2800,7 +2779,6 @@ async def get_user_country() -> str:
)


@pytest.mark.vcr()
async def test_openai_prompted_output_multiple(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-4o', provider=OpenAIProvider(api_key=openai_api_key))

Expand Down Expand Up @@ -2959,7 +2937,6 @@ async def test_process_response_no_created_timestamp(allow_model_requests: None)
assert response_message.timestamp == IsNow(tz=timezone.utc)


@pytest.mark.anyio()
async def test_tool_choice_fallback(allow_model_requests: None) -> None:
profile = OpenAIModelProfile(openai_supports_tool_choice_required=False).update(openai_model_profile('stub'))

Expand All @@ -2976,3 +2953,11 @@ async def test_tool_choice_fallback(allow_model_requests: None) -> None:
)

assert get_mock_chat_completion_kwargs(mock_client)[0]['tool_choice'] == 'auto'


async def test_openai_model_settings_temperature_ignored_on_gpt_5(allow_model_requests: None, openai_api_key: str):
m = OpenAIModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key))
agent = Agent(m)

result = await agent.run('What is the capital of France?', model_settings=ModelSettings(temperature=0.0))
assert result.output == snapshot('Paris.')