Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
135 changes: 135 additions & 0 deletions tests/entrypoints/openai/test_responses_function_call_parsing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Unit tests for ResponsesRequest function call parsing.

This test module ensures that function call dictionaries are properly parsed
as ResponseFunctionToolCall objects when creating ResponsesRequest instances,
preventing regression of the function call parsing issue.
"""

import pytest
from openai.types.responses.response_function_tool_call import (
ResponseFunctionToolCall)

from vllm.entrypoints.openai.protocol import ResponsesRequest


def test_function_call_dict_parsing():
"""Test that function call dictionaries are parsed as
ResponseFunctionToolCall objects."""
# Function call data as it would come from JSON/API
function_call_dict = {
'arguments': '{"location": "Tokyo"}',
'call_id': 'call_a90e65e81cd84553855dde1733e77749',
'name': 'get_weather',
'type': 'function_call',
'id': 'ft_a90e65e81cd84553855dde1733e77749',
'status': 'completed'
}

# Create ResponsesRequest with function call dictionary
request_data = {'input': [function_call_dict], 'model': 'test-model'}

request = ResponsesRequest(**request_data)
parsed_function_call = request.input[0]

# Verify that the dictionary was parsed as ResponseFunctionToolCall
assert isinstance(parsed_function_call, ResponseFunctionToolCall), (
f"Expected ResponseFunctionToolCall, got {type(parsed_function_call)}")

# Verify all attributes are preserved
assert parsed_function_call.arguments == '{"location": "Tokyo"}'
assert (parsed_function_call.call_id ==
'call_a90e65e81cd84553855dde1733e77749')
assert parsed_function_call.name == 'get_weather'
assert parsed_function_call.type == 'function_call'
assert parsed_function_call.id == 'ft_a90e65e81cd84553855dde1733e77749'
assert parsed_function_call.status == 'completed'


def test_direct_function_call_object_preservation():
"""Test that direct ResponseFunctionToolCall objects are preserved."""
# Create ResponseFunctionToolCall object directly
function_call_obj = ResponseFunctionToolCall(
arguments='{"city": "New York"}',
call_id='call_12345',
name='get_temperature',
type='function_call',
id='ft_12345',
status='completed')

# Create ResponsesRequest with the object
request_data = {'input': [function_call_obj], 'model': 'test-model'}

request = ResponsesRequest(**request_data)
parsed_function_call = request.input[0]

# Verify that the object is preserved
assert isinstance(parsed_function_call, ResponseFunctionToolCall)
# Should be the same object
assert parsed_function_call is function_call_obj


def test_mixed_input_types_with_function_calls():
"""Test ResponsesRequest with mixed input types including function calls."""
# Mix of string input and function call
function_call_dict = {
'arguments': '{"query": "weather"}',
'call_id': 'call_mixed_test',
'name': 'search',
'type': 'function_call',
'id': 'ft_mixed_test',
'status': 'completed'
}

request_data = {
'input': [
{
'role': 'user',
'content': 'Hello, how can I help?'
}, # Regular dictionary input
function_call_dict # Function call dict
],
'model':
'test-model'
}

request = ResponsesRequest(**request_data)

# Verify mixed types are handled correctly
assert len(request.input) == 2
assert isinstance(request.input[0], dict)
assert request.input[0]['role'] == 'user'
assert request.input[0]['content'] == 'Hello, how can I help?'

assert isinstance(request.input[1], ResponseFunctionToolCall)
assert request.input[1].name == 'search'
assert request.input[1].arguments == '{"query": "weather"}'


@pytest.mark.parametrize("arguments", [
'{"location": "Tokyo"}',
'{"location": {"city": "Tokyo", "country": "Japan"}, "units": "celsius"}',
'{"query": "weather", "filters": ["temperature", "humidity"], "count": 5}',
'{"complex": {"nested": {"data": true}}, "array": [1, 2, 3]}'
])
def test_function_call_with_complex_arguments(arguments):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Refer to https://platform.openai.com/docs/guides/function-calling?api-mode=responses

FunctionCallOutput will also be used as input. Could you also add a test for FunctionCallOutput?

"""Test function call parsing with various argument complexities."""
complex_function_call = {
'arguments': arguments,
'call_id': 'call_complex',
'name': 'get_detailed_weather',
'type': 'function_call',
'id': 'ft_complex',
'status': 'completed'
}

request_data = {'input': [complex_function_call], 'model': 'test-model'}

request = ResponsesRequest(**request_data)
parsed_function_call = request.input[0]

assert isinstance(parsed_function_call, ResponseFunctionToolCall)
assert parsed_function_call.name == 'get_detailed_weather'
assert parsed_function_call.arguments == arguments
24 changes: 24 additions & 0 deletions vllm/entrypoints/openai/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -960,6 +960,30 @@ def check_cache_salt_support(cls, data):
"non-empty string if provided.")
return data

@model_validator(mode="before")
def function_call_parsing(cls, data):
"""Function call parsing to ensure ResponseFunctionToolCall objects
are created."""
input_data = data.get("input")
Copy link
Collaborator

@aarnphm aarnphm Sep 9, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm confused this seems to live under ChatCompletionRequest, but the PR seems to be only Responses related.

I haven't looked at the responses code yet, but do we use this object in the responses API as well?

Copy link
Contributor Author

@JasonZhu1313 JasonZhu1313 Sep 9, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Last week tool calling was not supported so I have to use the response API which supports tool call

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

c29fb54 Looks like it's supported

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is a great improvement.

I also encountered this issue here: https://github.com/vllm-project/vllm/pull/20874/files#diff-31e6bd0df09a47b5587701203d558701ac46e4f85bf7db83632da9990eaef198R1546.
I wanted to use isinstance(input, ResponseFunctionToolCall) for the check, but since input is a dict, isinstance cannot be used — I had to resort to input.get().

However, I’m confused: why is function_call_parsing located under ChatCompletionRequest rather than ResponsesRequest? Shouldn’t it belong to ChatCompletionRequest?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JasonZhu1313 could your full client code. this change doesn't seem to impact. why do we depend on ChatCompletionRequest?

if isinstance(input_data, list):
fixed_input = []
for item in input_data:
if isinstance(item,
dict) and item.get('type') == 'function_call':
# Convert dict to ResponseFunctionToolCall object
try:
from openai.types.responses.response_function_tool_call import ( # noqa: E501
ResponseFunctionToolCall)
function_call_obj = ResponseFunctionToolCall(**item)
fixed_input.append(function_call_obj)
except Exception:
# If conversion fails, keep the original dict
fixed_input.append(item)
else:
fixed_input.append(item)
data["input"] = fixed_input
return data


class CompletionRequest(OpenAIBaseModel):
# Ordered by official OpenAI API documentation
Expand Down