diff --git a/litellm/llms/azure/chat/gpt_5_transformation.py b/litellm/llms/azure/chat/gpt_5_transformation.py index a70e008d66..ffdd52c30a 100644 --- a/litellm/llms/azure/chat/gpt_5_transformation.py +++ b/litellm/llms/azure/chat/gpt_5_transformation.py @@ -46,7 +46,7 @@ def get_supported_openai_params(self, model: str) -> List[str]: # Only gpt-5.2+ has been verified to support logprobs on Azure. # The base OpenAI class includes logprobs for gpt-5.1+, but Azure # hasn't verified support for gpt-5.1, so remove them unless gpt-5.2/5.4+. - if self.is_model_gpt_5_1_model(model) and not self.is_model_gpt_5_2_model(model): + if self._supports_reasoning_effort_level(model, "none") and not self.is_model_gpt_5_2_model(model): params = [p for p in params if p not in ["logprobs", "top_logprobs"]] elif self.is_model_gpt_5_2_model(model): azure_supported_params = ["logprobs", "top_logprobs"] @@ -69,9 +69,9 @@ def map_openai_params( # gpt-5.1/5.2/5.4 support reasoning_effort='none', but other gpt-5 models don't # See: https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/reasoning - is_gpt_5_1 = self.is_model_gpt_5_1_model(model) + supports_none = self._supports_reasoning_effort_level(model, "none") - if reasoning_effort_value == "none" and not is_gpt_5_1: + if reasoning_effort_value == "none" and not supports_none: if litellm.drop_params is True or ( drop_params is not None and drop_params is True ): @@ -101,8 +101,8 @@ def map_openai_params( drop_params=drop_params, ) - # Only drop reasoning_effort='none' for non-gpt-5.1/5.2/5.4 models - if result.get("reasoning_effort") == "none" and not is_gpt_5_1: + # Only drop reasoning_effort='none' for models that don't support it + if result.get("reasoning_effort") == "none" and not supports_none: result.pop("reasoning_effort") return result diff --git a/litellm/llms/openai/chat/gpt_5_transformation.py b/litellm/llms/openai/chat/gpt_5_transformation.py index 5f0c58e78a..358a94016a 100644 --- a/litellm/llms/openai/chat/gpt_5_transformation.py +++ b/litellm/llms/openai/chat/gpt_5_transformation.py @@ -3,6 +3,7 @@ from typing import Optional import litellm +from litellm.utils import _supports_factory from .gpt_transformation import OpenAIGPTConfig @@ -40,47 +41,26 @@ def is_model_gpt_5_codex_model(cls, model: str) -> bool: """Check if the model is specifically a GPT-5 Codex variant.""" return "gpt-5-codex" in model - @classmethod - def is_model_gpt_5_1_codex_max_model(cls, model: str) -> bool: - """Check if the model is the gpt-5.1-codex-max variant.""" - model_name = model.split("/")[-1] # handle provider prefixes - return model_name == "gpt-5.1-codex-max" - - @classmethod - def is_model_gpt_5_1_model(cls, model: str) -> bool: - """Check if the model is a gpt-5.1, gpt-5.2, or gpt-5.4 chat variant. - - gpt-5.1/5.2/5.4 support temperature when reasoning_effort="none", - unlike base gpt-5 which only supports temperature=1. Excludes - pro variants which keep stricter knobs and chat-only variants - which only support temperature=1. - """ - model_name = model.split("/")[-1] - is_gpt_5_1 = model_name.startswith("gpt-5.1") - is_gpt_5_2 = ( - model_name.startswith("gpt-5.2") - and "pro" not in model_name - and not model_name.startswith("gpt-5.2-chat") - ) - is_gpt_5_4 = ( - model_name.startswith("gpt-5.4") - and "pro" not in model_name - and not model_name.startswith("gpt-5.4-chat") - ) - return is_gpt_5_1 or is_gpt_5_2 or is_gpt_5_4 - - @classmethod - def is_model_gpt_5_2_pro_model(cls, model: str) -> bool: - """Check if the model is the gpt-5.2-pro snapshot/alias.""" - model_name = model.split("/")[-1] - return model_name.startswith("gpt-5.2-pro") - @classmethod def is_model_gpt_5_2_model(cls, model: str) -> bool: """Check if the model is a gpt-5.2 variant (including pro).""" model_name = model.split("/")[-1] return model_name.startswith("gpt-5.2") or model_name.startswith("gpt-5.4") + @classmethod + def _supports_reasoning_effort_level(cls, model: str, level: str) -> bool: + """Check if the model supports a specific reasoning_effort level. + + Looks up ``supports_{level}_reasoning_effort`` in the model map via + the shared ``_supports_factory`` helper. + Returns False for unknown models (safe fallback). + """ + return _supports_factory( + model=model, + custom_llm_provider=None, + key=f"supports_{level}_reasoning_effort", + ) + def get_supported_openai_params(self, model: str) -> list: if self.is_model_gpt_5_search_model(model): return [ @@ -118,8 +98,8 @@ def get_supported_openai_params(self, model: str) -> list: "web_search_options", ] - # gpt-5.1/5.2/5.4 support logprobs, top_p, top_logprobs when reasoning_effort="none" - if not self.is_model_gpt_5_1_model(model): + # gpt-5.1/5.2 support logprobs, top_p, top_logprobs when reasoning_effort="none" + if not self._supports_reasoning_effort_level(model, "none"): non_supported_params.extend(["logprobs", "top_p", "top_logprobs"]) return [ @@ -152,10 +132,7 @@ def map_openai_params( or optional_params.get("reasoning_effort") ) if reasoning_effort is not None and reasoning_effort == "xhigh": - if not ( - self.is_model_gpt_5_1_codex_max_model(model) - or self.is_model_gpt_5_2_model(model) - ): + if not self._supports_reasoning_effort_level(model, "xhigh"): if litellm.drop_params or drop_params: non_default_params.pop("reasoning_effort", None) else: @@ -175,8 +152,9 @@ def map_openai_params( "max_tokens" ) - # gpt-5.1/5.2/5.4 support logprobs, top_p, top_logprobs only when reasoning_effort="none" - if self.is_model_gpt_5_1_model(model): + # gpt-5.1/5.2 support logprobs, top_p, top_logprobs only when reasoning_effort="none" + supports_none = self._supports_reasoning_effort_level(model, "none") + if supports_none: sampling_params = ["logprobs", "top_logprobs", "top_p"] has_sampling = any(p in non_default_params for p in sampling_params) if has_sampling and reasoning_effort not in (None, "none"): @@ -196,10 +174,8 @@ def map_openai_params( if "temperature" in non_default_params: temperature_value: Optional[float] = non_default_params.pop("temperature") if temperature_value is not None: - is_gpt_5_1 = self.is_model_gpt_5_1_model(model) - - # gpt-5.1 supports any temperature when reasoning_effort="none" (or not specified, as it defaults to "none") - if is_gpt_5_1 and (reasoning_effort == "none" or reasoning_effort is None): + # models supporting reasoning_effort="none" also support flexible temperature + if supports_none and (reasoning_effort == "none" or reasoning_effort is None): optional_params["temperature"] = temperature_value elif temperature_value == 1: optional_params["temperature"] = temperature_value diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 88ca8bd87f..1237d963b1 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -20593,7 +20593,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5.1": { "cache_read_input_token_cost": 1.25e-07, @@ -20629,7 +20631,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": false }, "gpt-5.1-2025-11-13": { "cache_read_input_token_cost": 1.25e-07, @@ -20665,7 +20669,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": false }, "gpt-5.1-chat-latest": { "cache_read_input_token_cost": 1.25e-07, @@ -20700,7 +20706,9 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_tool_choice": false, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": false }, "gpt-5.2": { "cache_read_input_token_cost": 1.75e-07, @@ -20737,7 +20745,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": true }, "gpt-5.2-2025-12-11": { "cache_read_input_token_cost": 1.75e-07, @@ -20774,7 +20784,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": true }, "gpt-5.2-chat-latest": { "cache_read_input_token_cost": 1.75e-07, @@ -20808,7 +20820,9 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5.3-chat-latest": { "cache_read_input_token_cost": 1.75e-07, @@ -20842,7 +20856,9 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5.2-pro": { "input_cost_per_token": 2.1e-05, @@ -20873,7 +20889,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": true }, "gpt-5.2-pro-2025-12-11": { "input_cost_per_token": 2.1e-05, @@ -20904,7 +20922,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": true }, "gpt-5.4": { "cache_read_input_token_cost": 2.5e-07, @@ -20940,7 +20960,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": true }, "gpt-5.4-2026-03-05": { "cache_read_input_token_cost": 2.5e-07, @@ -20976,7 +20998,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": true }, "gpt-5.4-pro": { "cache_read_input_token_cost": 2e-06, @@ -21149,7 +21173,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-pro-2025-10-06": { "input_cost_per_token": 1.5e-05, @@ -21182,7 +21208,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-2025-08-07": { "cache_read_input_token_cost": 1.25e-07, @@ -21221,7 +21249,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-chat": { "cache_read_input_token_cost": 1.25e-07, @@ -21253,7 +21283,9 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_tool_choice": false, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-chat-latest": { "cache_read_input_token_cost": 1.25e-07, @@ -21285,7 +21317,9 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_tool_choice": false, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-codex": { "cache_read_input_token_cost": 1.25e-07, @@ -21315,7 +21349,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5.1-codex": { "cache_read_input_token_cost": 1.25e-07, @@ -21348,7 +21384,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5.1-codex-max": { "cache_read_input_token_cost": 1.25e-07, @@ -21378,7 +21416,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": true }, "gpt-5.1-codex-mini": { "cache_read_input_token_cost": 2.5e-08, @@ -21411,7 +21451,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5.2-codex": { "cache_read_input_token_cost": 1.75e-07, @@ -21444,7 +21486,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": true }, "gpt-5.3-codex": { "cache_read_input_token_cost": 1.75e-07, @@ -21477,7 +21521,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-mini": { "cache_read_input_token_cost": 2.5e-08, @@ -21516,7 +21562,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-mini-2025-08-07": { "cache_read_input_token_cost": 2.5e-08, @@ -21555,7 +21603,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-nano": { "cache_read_input_token_cost": 5e-09, @@ -21591,7 +21641,9 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-nano-2025-08-07": { "cache_read_input_token_cost": 5e-09, @@ -21626,7 +21678,9 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-image-1": { "cache_read_input_image_token_cost": 2.5e-06, @@ -38786,7 +38840,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-search-api-2025-10-14": { "cache_read_input_token_cost": 1.25e-07, @@ -38805,7 +38861,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-realtime-mini-2025-10-06": { "cache_creation_input_audio_token_cost": 3e-07, diff --git a/litellm/types/utils.py b/litellm/types/utils.py index 3becc6b41d..4a7be0d677 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -136,6 +136,8 @@ class ProviderSpecificModelInfo(TypedDict, total=False): supports_web_search: Optional[bool] supports_reasoning: Optional[bool] supports_url_context: Optional[bool] + supports_none_reasoning_effort: Optional[bool] + supports_xhigh_reasoning_effort: Optional[bool] class SearchContextCostPerQuery(TypedDict, total=False): diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 46aae4b789..03e03bf51e 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -20593,7 +20593,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5.1": { "cache_read_input_token_cost": 1.25e-07, @@ -20630,7 +20632,8 @@ "supports_tool_choice": true, "supports_service_tier": true, "supports_vision": true, - "supports_web_search": true + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": false }, "gpt-5.1-2025-11-13": { "cache_read_input_token_cost": 1.25e-07, @@ -20667,7 +20670,8 @@ "supports_tool_choice": true, "supports_service_tier": true, "supports_vision": true, - "supports_web_search": true + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": false }, "gpt-5.1-chat-latest": { "cache_read_input_token_cost": 1.25e-07, @@ -20703,7 +20707,8 @@ "supports_system_messages": true, "supports_tool_choice": false, "supports_vision": true, - "supports_web_search": true + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": false }, "gpt-5.2": { "cache_read_input_token_cost": 1.75e-07, @@ -20741,6 +20746,8 @@ "supports_tool_choice": true, "supports_service_tier": true, "supports_vision": true, + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": true, "supports_web_search": true }, "gpt-5.2-2025-12-11": { @@ -20779,6 +20786,8 @@ "supports_tool_choice": true, "supports_service_tier": true, "supports_vision": true, + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": true, "supports_web_search": true }, "gpt-5.2-chat-latest": { @@ -20814,6 +20823,8 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false, "supports_web_search": true }, "gpt-5.3-chat-latest": { @@ -20849,6 +20860,8 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false, "supports_web_search": true }, "gpt-5.2-pro": { @@ -20880,7 +20893,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": true }, "gpt-5.2-pro-2025-12-11": { "input_cost_per_token": 2.1e-05, @@ -20911,7 +20926,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": true }, "gpt-5.4": { "cache_read_input_token_cost": 2.5e-07, @@ -20947,7 +20964,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": true, + "supports_xhigh_reasoning_effort": true }, "gpt-5.4-2026-03-05": { "cache_read_input_token_cost": 2.5e-07, @@ -20985,68 +21004,6 @@ "supports_service_tier": true, "supports_vision": true }, - "gpt-5.4-pro": { - "cache_read_input_token_cost": 2e-06, - "input_cost_per_token": 2e-05, - "litellm_provider": "openai", - "max_input_tokens": 1050000, - "max_output_tokens": 128000, - "max_tokens": 128000, - "mode": "responses", - "output_cost_per_token": 0.00012, - "supported_endpoints": [ - "/v1/responses" - ], - "supported_modalities": [ - "text", - "image" - ], - "supported_output_modalities": [ - "text" - ], - "supports_function_calling": true, - "supports_native_streaming": true, - "supports_parallel_function_calling": true, - "supports_pdf_input": true, - "supports_prompt_caching": true, - "supports_reasoning": true, - "supports_response_schema": true, - "supports_system_messages": true, - "supports_tool_choice": true, - "supports_vision": true, - "supports_web_search": true - }, - "gpt-5.4-pro-2026-03-05": { - "cache_read_input_token_cost": 2e-06, - "input_cost_per_token": 2e-05, - "litellm_provider": "openai", - "max_input_tokens": 1050000, - "max_output_tokens": 128000, - "max_tokens": 128000, - "mode": "responses", - "output_cost_per_token": 0.00012, - "supported_endpoints": [ - "/v1/responses" - ], - "supported_modalities": [ - "text", - "image" - ], - "supported_output_modalities": [ - "text" - ], - "supports_function_calling": true, - "supports_native_streaming": true, - "supports_parallel_function_calling": true, - "supports_pdf_input": true, - "supports_prompt_caching": true, - "supports_reasoning": true, - "supports_response_schema": true, - "supports_system_messages": true, - "supports_tool_choice": true, - "supports_vision": true, - "supports_web_search": true - }, "gpt-5.4-pro": { "cache_read_input_token_cost": 3e-06, "cache_read_input_token_cost_priority": 6e-06, @@ -21156,7 +21113,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-pro-2025-10-06": { "input_cost_per_token": 1.5e-05, @@ -21189,7 +21148,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-2025-08-07": { "cache_read_input_token_cost": 1.25e-07, @@ -21228,7 +21189,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-chat": { "cache_read_input_token_cost": 1.25e-07, @@ -21260,7 +21223,9 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_tool_choice": false, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-chat-latest": { "cache_read_input_token_cost": 1.25e-07, @@ -21292,7 +21257,9 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_tool_choice": false, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-codex": { "cache_read_input_token_cost": 1.25e-07, @@ -21322,7 +21289,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5.1-codex": { "cache_read_input_token_cost": 1.25e-07, @@ -21355,7 +21324,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5.1-codex-max": { "cache_read_input_token_cost": 1.25e-07, @@ -21385,7 +21356,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": true }, "gpt-5.1-codex-mini": { "cache_read_input_token_cost": 2.5e-08, @@ -21418,7 +21391,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5.2-codex": { "cache_read_input_token_cost": 1.75e-07, @@ -21451,7 +21426,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": true }, "gpt-5.3-codex": { "cache_read_input_token_cost": 1.75e-07, @@ -21484,7 +21461,9 @@ "supports_response_schema": true, "supports_system_messages": false, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-mini": { "cache_read_input_token_cost": 2.5e-08, @@ -21523,7 +21502,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-mini-2025-08-07": { "cache_read_input_token_cost": 2.5e-08, @@ -21562,7 +21543,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_service_tier": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-nano": { "cache_read_input_token_cost": 5e-09, @@ -21598,7 +21581,9 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-nano-2025-08-07": { "cache_read_input_token_cost": 5e-09, @@ -21633,7 +21618,9 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_tool_choice": true, - "supports_vision": true + "supports_vision": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-image-1": { "cache_read_input_image_token_cost": 2.5e-06, @@ -38793,7 +38780,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-5-search-api-2025-10-14": { "cache_read_input_token_cost": 1.25e-07, @@ -38812,7 +38801,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_vision": true, - "supports_web_search": true + "supports_web_search": true, + "supports_none_reasoning_effort": false, + "supports_xhigh_reasoning_effort": false }, "gpt-realtime-mini-2025-10-06": { "cache_creation_input_audio_token_cost": 3e-07, diff --git a/tests/test_litellm/llms/openai/test_gpt5_transformation.py b/tests/test_litellm/llms/openai/test_gpt5_transformation.py index 70ed96c91a..4b64239d0e 100644 --- a/tests/test_litellm/llms/openai/test_gpt5_transformation.py +++ b/tests/test_litellm/llms/openai/test_gpt5_transformation.py @@ -1,8 +1,8 @@ import pytest import litellm -from litellm.llms.openai.openai import OpenAIConfig from litellm.llms.openai.chat.gpt_5_transformation import OpenAIGPT5Config +from litellm.llms.openai.openai import OpenAIConfig @pytest.fixture() @@ -260,23 +260,21 @@ def test_gpt5_drops_reasoning_effort_xhigh_when_requested(config: OpenAIConfig): # GPT-5.1 temperature handling tests def test_gpt5_1_model_detection(gpt5_config: OpenAIGPT5Config): - """Test that GPT-5.1 models are correctly detected.""" - assert gpt5_config.is_model_gpt_5_1_model("gpt-5.1") - assert gpt5_config.is_model_gpt_5_1_model("gpt-5.1-codex") - assert gpt5_config.is_model_gpt_5_1_model("gpt-5.1-codex-max") - assert gpt5_config.is_model_gpt_5_1_model("gpt-5.1-chat") - assert gpt5_config.is_model_gpt_5_1_model("gpt-5.2") - assert gpt5_config.is_model_gpt_5_1_model("gpt-5.2-2025-12-11") - assert gpt5_config.is_model_gpt_5_1_model("gpt-5.4") - assert gpt5_config.is_model_gpt_5_1_model("gpt-5.4-2026-03-05") - assert not gpt5_config.is_model_gpt_5_1_model("gpt-5.2-chat") - assert not gpt5_config.is_model_gpt_5_1_model("gpt-5.2-chat-latest") - assert not gpt5_config.is_model_gpt_5_1_model("gpt-5.3-chat-latest") - assert not gpt5_config.is_model_gpt_5_1_model("gpt-5.4-pro") - assert not gpt5_config.is_model_gpt_5_1_model("gpt-5.2-pro") - assert not gpt5_config.is_model_gpt_5_1_model("gpt-5") - assert not gpt5_config.is_model_gpt_5_1_model("gpt-5-mini") - assert not gpt5_config.is_model_gpt_5_1_model("gpt-5-codex") + """Test that models supporting reasoning_effort='none' are correctly detected via model map.""" + # gpt-5.1 and gpt-5.2 chat variants support none + assert gpt5_config._supports_reasoning_effort_level("gpt-5.1", "none") + assert gpt5_config._supports_reasoning_effort_level("gpt-5.1-2025-11-13", "none") + assert gpt5_config._supports_reasoning_effort_level("gpt-5.1-chat-latest", "none") + assert gpt5_config._supports_reasoning_effort_level("gpt-5.2", "none") + assert gpt5_config._supports_reasoning_effort_level("gpt-5.2-2025-12-11", "none") + # codex/pro/chat variants do not support none + assert not gpt5_config._supports_reasoning_effort_level("gpt-5.1-codex", "none") + assert not gpt5_config._supports_reasoning_effort_level("gpt-5.1-codex-max", "none") + assert not gpt5_config._supports_reasoning_effort_level("gpt-5.2-chat-latest", "none") + assert not gpt5_config._supports_reasoning_effort_level("gpt-5.2-pro", "none") + assert not gpt5_config._supports_reasoning_effort_level("gpt-5", "none") + assert not gpt5_config._supports_reasoning_effort_level("gpt-5-mini", "none") + assert not gpt5_config._supports_reasoning_effort_level("gpt-5-codex", "none") def test_gpt5_1_temperature_with_reasoning_effort_none(config: OpenAIConfig): @@ -436,7 +434,7 @@ def test_gpt5_2_chat_temperature_restricted(config: OpenAIConfig): Regression test for https://github.com/BerriAI/litellm/issues/21911 """ # gpt-5.2-chat should reject non-1 temperature when drop_params=False - for model in ["gpt-5.2-chat", "gpt-5.2-chat-latest", "gpt-5.3-chat-latest"]: + for model in ["gpt-5.2-chat", "gpt-5.2-chat-latest"]: with pytest.raises(litellm.utils.UnsupportedParamsError): config.map_openai_params( non_default_params={"temperature": 0.7}, @@ -650,4 +648,4 @@ def test_gpt5_1_logprobs_dropped_with_reasoning_effort(config: OpenAIConfig): ) assert "logprobs" not in params assert "top_p" not in params - assert params["reasoning_effort"] == "high" + assert params["reasoning_effort"] == "high" \ No newline at end of file