Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions litellm/llms/azure/chat/gpt_5_transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def get_supported_openai_params(self, model: str) -> List[str]:
# Only gpt-5.2+ has been verified to support logprobs on Azure.
# The base OpenAI class includes logprobs for gpt-5.1+, but Azure
# hasn't verified support for gpt-5.1, so remove them unless gpt-5.2/5.4+.
if self.is_model_gpt_5_1_model(model) and not self.is_model_gpt_5_2_model(model):
if self._supports_reasoning_effort_level(model, "none") and not self.is_model_gpt_5_2_model(model):
params = [p for p in params if p not in ["logprobs", "top_logprobs"]]
elif self.is_model_gpt_5_2_model(model):
azure_supported_params = ["logprobs", "top_logprobs"]
Expand All @@ -69,9 +69,9 @@ def map_openai_params(

# gpt-5.1/5.2/5.4 support reasoning_effort='none', but other gpt-5 models don't
# See: https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/reasoning
is_gpt_5_1 = self.is_model_gpt_5_1_model(model)
supports_none = self._supports_reasoning_effort_level(model, "none")

if reasoning_effort_value == "none" and not is_gpt_5_1:
if reasoning_effort_value == "none" and not supports_none:
if litellm.drop_params is True or (
drop_params is not None and drop_params is True
):
Expand Down Expand Up @@ -101,8 +101,8 @@ def map_openai_params(
drop_params=drop_params,
)

# Only drop reasoning_effort='none' for non-gpt-5.1/5.2/5.4 models
if result.get("reasoning_effort") == "none" and not is_gpt_5_1:
# Only drop reasoning_effort='none' for models that don't support it
if result.get("reasoning_effort") == "none" and not supports_none:
result.pop("reasoning_effort")

return result
Expand Down
70 changes: 23 additions & 47 deletions litellm/llms/openai/chat/gpt_5_transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from typing import Optional

import litellm
from litellm.utils import _supports_factory

from .gpt_transformation import OpenAIGPTConfig

Expand Down Expand Up @@ -40,47 +41,26 @@ def is_model_gpt_5_codex_model(cls, model: str) -> bool:
"""Check if the model is specifically a GPT-5 Codex variant."""
return "gpt-5-codex" in model

@classmethod
def is_model_gpt_5_1_codex_max_model(cls, model: str) -> bool:
"""Check if the model is the gpt-5.1-codex-max variant."""
model_name = model.split("/")[-1] # handle provider prefixes
return model_name == "gpt-5.1-codex-max"

@classmethod
def is_model_gpt_5_1_model(cls, model: str) -> bool:
"""Check if the model is a gpt-5.1, gpt-5.2, or gpt-5.4 chat variant.

gpt-5.1/5.2/5.4 support temperature when reasoning_effort="none",
unlike base gpt-5 which only supports temperature=1. Excludes
pro variants which keep stricter knobs and chat-only variants
which only support temperature=1.
"""
model_name = model.split("/")[-1]
is_gpt_5_1 = model_name.startswith("gpt-5.1")
is_gpt_5_2 = (
model_name.startswith("gpt-5.2")
and "pro" not in model_name
and not model_name.startswith("gpt-5.2-chat")
)
is_gpt_5_4 = (
model_name.startswith("gpt-5.4")
and "pro" not in model_name
and not model_name.startswith("gpt-5.4-chat")
)
return is_gpt_5_1 or is_gpt_5_2 or is_gpt_5_4

@classmethod
def is_model_gpt_5_2_pro_model(cls, model: str) -> bool:
"""Check if the model is the gpt-5.2-pro snapshot/alias."""
model_name = model.split("/")[-1]
return model_name.startswith("gpt-5.2-pro")

@classmethod
def is_model_gpt_5_2_model(cls, model: str) -> bool:
"""Check if the model is a gpt-5.2 variant (including pro)."""
model_name = model.split("/")[-1]
return model_name.startswith("gpt-5.2") or model_name.startswith("gpt-5.4")

@classmethod
def _supports_reasoning_effort_level(cls, model: str, level: str) -> bool:
"""Check if the model supports a specific reasoning_effort level.

Looks up ``supports_{level}_reasoning_effort`` in the model map via
the shared ``_supports_factory`` helper.
Returns False for unknown models (safe fallback).
"""
return _supports_factory(
model=model,
custom_llm_provider=None,
key=f"supports_{level}_reasoning_effort",
)

def get_supported_openai_params(self, model: str) -> list:
if self.is_model_gpt_5_search_model(model):
return [
Expand Down Expand Up @@ -118,8 +98,8 @@ def get_supported_openai_params(self, model: str) -> list:
"web_search_options",
]

# gpt-5.1/5.2/5.4 support logprobs, top_p, top_logprobs when reasoning_effort="none"
if not self.is_model_gpt_5_1_model(model):
# gpt-5.1/5.2 support logprobs, top_p, top_logprobs when reasoning_effort="none"
if not self._supports_reasoning_effort_level(model, "none"):
non_supported_params.extend(["logprobs", "top_p", "top_logprobs"])

return [
Expand Down Expand Up @@ -152,10 +132,7 @@ def map_openai_params(
or optional_params.get("reasoning_effort")
)
if reasoning_effort is not None and reasoning_effort == "xhigh":
if not (
self.is_model_gpt_5_1_codex_max_model(model)
or self.is_model_gpt_5_2_model(model)
):
if not self._supports_reasoning_effort_level(model, "xhigh"):
if litellm.drop_params or drop_params:
non_default_params.pop("reasoning_effort", None)
else:
Expand All @@ -175,8 +152,9 @@ def map_openai_params(
"max_tokens"
)

# gpt-5.1/5.2/5.4 support logprobs, top_p, top_logprobs only when reasoning_effort="none"
if self.is_model_gpt_5_1_model(model):
# gpt-5.1/5.2 support logprobs, top_p, top_logprobs only when reasoning_effort="none"
supports_none = self._supports_reasoning_effort_level(model, "none")
if supports_none:
sampling_params = ["logprobs", "top_logprobs", "top_p"]
has_sampling = any(p in non_default_params for p in sampling_params)
if has_sampling and reasoning_effort not in (None, "none"):
Expand All @@ -196,10 +174,8 @@ def map_openai_params(
if "temperature" in non_default_params:
temperature_value: Optional[float] = non_default_params.pop("temperature")
if temperature_value is not None:
is_gpt_5_1 = self.is_model_gpt_5_1_model(model)

# gpt-5.1 supports any temperature when reasoning_effort="none" (or not specified, as it defaults to "none")
if is_gpt_5_1 and (reasoning_effort == "none" or reasoning_effort is None):
# models supporting reasoning_effort="none" also support flexible temperature
if supports_none and (reasoning_effort == "none" or reasoning_effort is None):
optional_params["temperature"] = temperature_value
elif temperature_value == 1:
optional_params["temperature"] = temperature_value
Expand Down
Loading
Loading