Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion litellm/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -578,7 +578,6 @@
"thinking",
"web_search_options",
"service_tier",
"store",
"prompt_cache_key",
Comment on lines 578 to 581
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Removing store from OPENAI_CHAT_COMPLETION_PARAMS will break the existing test test_store_in_openai_chat_completion_params() added in PR #21195 (litellm/constants.py:581). This test explicitly checks assert "store" in OPENAI_CHAT_COMPLETION_PARAMS and will fail.

Additionally, get_standard_openai_params() relies on this list to identify standard OpenAI parameters. Removing store means it will be filtered out when calling OpenAI/Azure, breaking the functionality that PR #21195 was supposed to fix.

"prompt_cache_retention",
"safety_identifier",
Expand Down
3 changes: 1 addition & 2 deletions litellm/llms/azure/chat/gpt_transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ def get_supported_openai_params(self, model: str) -> List[str]:
"audio",
"web_search_options",
"prompt_cache_key",
"store",
]

def _is_response_format_supported_model(self, model: str) -> bool:
Expand Down Expand Up @@ -158,7 +159,6 @@ def map_openai_params(
api_version: str = "",
) -> dict:
supported_openai_params = self.get_supported_openai_params(model)

api_version_times = api_version.split("-")

if len(api_version_times) >= 3:
Expand Down Expand Up @@ -245,7 +245,6 @@ def map_openai_params(
optional_params["tools"].extend(value)
elif param in supported_openai_params:
optional_params[param] = value

return optional_params

def transform_request(
Expand Down
1 change: 1 addition & 0 deletions litellm/llms/openai/chat/gpt_transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,7 @@ def get_supported_openai_params(self, model: str) -> list:
"service_tier",
"safety_identifier",
"prompt_cache_key",
"store",
] # works across all models

model_specific_params = []
Expand Down
39 changes: 39 additions & 0 deletions tests/llm_translation/test_optional_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -2045,3 +2045,42 @@ def test_store_in_openai_chat_completion_params():
result = get_standard_openai_params({"store": True, "temperature": 0.7})
assert "store" in result
assert result["store"] is True


def test_store_param_passed_through_openai_azure():
"""
Test that the `store` parameter is correctly passed through to OpenAI
and Azure OpenAI providers when using get_optional_params().

This verifies the fix for the regression where `store` was being filtered
out by get_non_default_completion_params() due to architectural issues
in parameter processing pipeline.

Ref: https://github.com/BerriAI/litellm/issues/19700
"""
# Test OpenAI provider
optional_params_openai = get_optional_params(
model="gpt-4o",
custom_llm_provider="openai",
store=True,
)
assert "store" in optional_params_openai
assert optional_params_openai["store"] is True

# Test Azure OpenAI provider
optional_params_azure = get_optional_params(
model="gpt-4.1-2025-04-14",
custom_llm_provider="azure",
store=True,
)
assert "store" in optional_params_azure
assert optional_params_azure["store"] is True

# Test with store=False
optional_params_false = get_optional_params(
model="gpt-4o",
custom_llm_provider="openai",
store=False,
)
assert "store" in optional_params_false
assert optional_params_false["store"] is False
Loading