Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 20 additions & 4 deletions litellm/llms/azure/responses/transformation.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union
from copy import deepcopy

import httpx
from openai.types.responses import ResponseReasoningItem
Expand Down Expand Up @@ -43,7 +44,7 @@ def _handle_reasoning_item(self, item: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle reasoning items to filter out the status field.
Issue: https://github.com/BerriAI/litellm/issues/13484

Azure OpenAI API does not accept 'status' field in reasoning input items.
"""
if item.get("type") == "reasoning":
Expand Down Expand Up @@ -78,7 +79,7 @@ def _handle_reasoning_item(self, item: Dict[str, Any]) -> Dict[str, Any]:
}
return filtered_item
return item

def _validate_input_param(
self, input: Union[str, ResponseInputParam]
) -> Union[str, ResponseInputParam]:
Expand All @@ -90,7 +91,7 @@ def _validate_input_param(

# First call parent's validation
validated_input = super()._validate_input_param(input)

# Then filter out status from message items
if isinstance(validated_input, list):
filtered_input: List[Any] = []
Expand All @@ -102,7 +103,7 @@ def _validate_input_param(
else:
filtered_input.append(item)
return cast(ResponseInputParam, filtered_input)

return validated_input

def transform_responses_api_request(
Expand All @@ -116,6 +117,21 @@ def transform_responses_api_request(
"""No transform applied since inputs are in OpenAI spec already"""
stripped_model_name = self.get_stripped_model_name(model)

# Azure Responses API requires flattened tools (params at top level, not nested in 'function')
if "tools" in response_api_optional_request_params and isinstance(
response_api_optional_request_params["tools"], list
):
new_tools: List[Dict[str, Any]] = []
for tool in response_api_optional_request_params["tools"]:
if isinstance(tool, dict) and "function" in tool:
new_tool: Dict[str, Any] = deepcopy(tool)
function_data = new_tool.pop("function")
new_tool.update(function_data)
new_tools.append(new_tool)
else:
new_tools.append(tool)
response_api_optional_request_params["tools"] = new_tools

return super().transform_responses_api_request(
model=stripped_model_name,
input=input,
Expand Down
173 changes: 160 additions & 13 deletions tests/test_litellm/llms/azure/response/test_azure_transformation.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os
import sys
from copy import deepcopy
from unittest.mock import patch

import pytest
Expand Down Expand Up @@ -191,12 +192,12 @@ def test_o_series_model_detection():
config = AzureOpenAIOSeriesResponsesAPIConfig()

# Test explicit o_series naming
assert config.is_o_series_model("o_series/gpt-o1") == True
assert config.is_o_series_model("azure/o_series/gpt-o3") == True
assert config.is_o_series_model("o_series/gpt-o1")
assert config.is_o_series_model("azure/o_series/gpt-o3")

# Test regular models
assert config.is_o_series_model("gpt-4o") == False
assert config.is_o_series_model("gpt-3.5-turbo") == False
assert not config.is_o_series_model("gpt-4o")
assert not config.is_o_series_model("gpt-3.5-turbo")


@pytest.mark.serial
Expand Down Expand Up @@ -297,19 +298,19 @@ def test_azure_get_complete_url_with_default_api_version(self):
def test_azure_cancel_response_api_request(self):
"""Test Azure cancel response API request transformation"""
from litellm.types.router import GenericLiteLLMParams

response_id = "resp_test123"
api_base = "https://test.openai.azure.com/openai/responses?api-version=2024-05-01-preview"
litellm_params = GenericLiteLLMParams(api_version="2024-05-01-preview")
headers = {"Authorization": "Bearer test-key"}

url, data = self.config.transform_cancel_response_api_request(
response_id=response_id,
api_base=api_base,
litellm_params=litellm_params,
headers=headers,
)

expected_url = "https://test.openai.azure.com/openai/responses/resp_test123/cancel?api-version=2024-05-01-preview"
assert url == expected_url
assert data == {}
Expand All @@ -318,7 +319,7 @@ def test_azure_cancel_response_api_response(self):
"""Test Azure cancel response API response transformation"""
from unittest.mock import Mock
from litellm.types.llms.openai import ResponsesAPIResponse

# Mock response
mock_response = Mock()
mock_response.json.return_value = {
Expand All @@ -330,18 +331,164 @@ def test_azure_cancel_response_api_response(self):
"tool_choice": "auto",
"tools": [],
"top_p": 1.0,
"status": "cancelled"
"status": "cancelled",
}
mock_response.text = "test response"
mock_response.status_code = 200

# Mock logging object
mock_logging_obj = Mock()

result = self.config.transform_cancel_response_api_response(
raw_response=mock_response,
logging_obj=mock_logging_obj,
)

assert isinstance(result, ResponsesAPIResponse)
assert result.id == "resp_test123"
assert result.id == "resp_test123"

def test_azure_responses_api_tool_flattening_nested_to_flat(self):
"""Test that nested tools are flattened correctly"""
from litellm.types.router import GenericLiteLLMParams

# Setup
nested_tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {"type": "object", "properties": {}},
},
}
]

response_api_params = {"tools": nested_tools}
litellm_params = GenericLiteLLMParams()

# Execute
self.config.transform_responses_api_request(
model=self.model,
input="test input",
response_api_optional_request_params=response_api_params,
litellm_params=litellm_params,
headers={},
)

# Verify
expected_tools = [
{
"type": "function",
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {"type": "object", "properties": {}},
}
]
assert response_api_params["tools"] == expected_tools

def test_azure_responses_api_tool_flattening_already_flat(self):
"""Test that already flat tools are passed through unchanged"""
from litellm.types.router import GenericLiteLLMParams

# Setup
flat_tools = [
{
"type": "function",
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {"type": "object", "properties": {}},
}
]

# Make a copy to check it doesn't change
response_api_params = {"tools": list(flat_tools)}
litellm_params = GenericLiteLLMParams()

# Execute
self.config.transform_responses_api_request(
model=self.model,
input="test input",
response_api_optional_request_params=response_api_params,
litellm_params=litellm_params,
headers={},
)

# Verify
assert response_api_params["tools"] == flat_tools

def test_azure_responses_api_tool_flattening_preserves_original(self):
"""Test that the original tool dictionary is not mutated"""
from litellm.types.router import GenericLiteLLMParams

# Setup
original_tool = {
"type": "function",
"function": {"name": "get_weather", "parameters": {}},
}
original_tool_copy = deepcopy(original_tool)

response_api_params = {"tools": [original_tool]}
litellm_params = GenericLiteLLMParams()

# Execute
self.config.transform_responses_api_request(
model=self.model,
input="test input",
response_api_optional_request_params=response_api_params,
litellm_params=litellm_params,
headers={},
)

assert original_tool == original_tool_copy

def test_azure_responses_api_tool_flattening_mixed_tools(self):
"""Test mixed nested and flat tools"""
from litellm.types.router import GenericLiteLLMParams

# Setup
nested_tool = {
"type": "function",
"function": {"name": "nested", "parameters": {}},
}
flat_tool = {"type": "function", "name": "flat", "parameters": {}}

response_api_params = {"tools": [nested_tool, flat_tool]}
litellm_params = GenericLiteLLMParams()

# Execute
self.config.transform_responses_api_request(
model=self.model,
input="test input",
response_api_optional_request_params=response_api_params,
litellm_params=litellm_params,
headers={},
)

# Verify
assert len(response_api_params["tools"]) == 2

# First tool should be flattened
assert "function" not in response_api_params["tools"][0]
assert response_api_params["tools"][0]["name"] == "nested"

# Second tool should remain as is
assert response_api_params["tools"][1] == flat_tool

def test_azure_responses_api_tool_flattening_no_tools(self):
"""Test handling when no tools are present"""
from litellm.types.router import GenericLiteLLMParams

# Setup
response_api_params = {}
litellm_params = GenericLiteLLMParams()

# Execute - should not crash
self.config.transform_responses_api_request(
model=self.model,
input="test input",
response_api_optional_request_params=response_api_params,
litellm_params=litellm_params,
headers={},
)

assert "tools" not in response_api_params
Loading