diff --git a/python/DEV_SETUP.md b/python/DEV_SETUP.md index 2d4b9b92b1..fc261bdde0 100644 --- a/python/DEV_SETUP.md +++ b/python/DEV_SETUP.md @@ -154,6 +154,14 @@ Example: chat_completion = OpenAIChatClient(env_file_path="openai.env") ``` +# Method naming inside connectors + +When naming methods inside connectors, we have a loose preference for using the following conventions: +- Use `_prepare__for_` as a prefix for methods that prepare data for sending to the external service. +- Use `_parse__from_` as a prefix for methods that process data received from the external service. + +This is not a strict rule, but a guideline to help maintain consistency across the codebase. + ## Tests All the tests are located in the `tests` folder of each package. There are tests that are marked with a `@skip_if_..._integration_tests_disabled` decorator, these are integration tests that require an external service to be running, like OpenAI or Azure OpenAI. diff --git a/python/packages/a2a/agent_framework_a2a/_agent.py b/python/packages/a2a/agent_framework_a2a/_agent.py index f90d7214a0..cd85509a40 100644 --- a/python/packages/a2a/agent_framework_a2a/_agent.py +++ b/python/packages/a2a/agent_framework_a2a/_agent.py @@ -237,14 +237,14 @@ async def run_stream( An agent response item. """ messages = self._normalize_messages(messages) - a2a_message = self._chat_message_to_a2a_message(messages[-1]) + a2a_message = self._prepare_message_for_a2a(messages[-1]) response_stream = self.client.send_message(a2a_message) async for item in response_stream: if isinstance(item, Message): # Process A2A Message - contents = self._a2a_parts_to_contents(item.parts) + contents = self._parse_contents_from_a2a(item.parts) yield AgentRunResponseUpdate( contents=contents, role=Role.ASSISTANT if item.role == A2ARole.agent else Role.USER, @@ -255,7 +255,7 @@ async def run_stream( task, _update_event = item if isinstance(task, Task) and task.status.state in TERMINAL_TASK_STATES: # Convert Task artifacts to ChatMessages and yield as separate updates - task_messages = self._task_to_chat_messages(task) + task_messages = self._parse_messages_from_task(task) if task_messages: for message in task_messages: # Use the artifact's ID from raw_representation as message_id for unique identification @@ -280,8 +280,8 @@ async def run_stream( msg = f"Only Message and Task responses are supported from A2A agents. Received: {type(item)}" raise NotImplementedError(msg) - def _chat_message_to_a2a_message(self, message: ChatMessage) -> A2AMessage: - """Convert a ChatMessage to an A2A Message. + def _prepare_message_for_a2a(self, message: ChatMessage) -> A2AMessage: + """Prepare a ChatMessage for the A2A protocol. Transforms Agent Framework ChatMessage objects into A2A protocol Messages by: - Converting all message contents to appropriate A2A Part types @@ -361,8 +361,8 @@ def _chat_message_to_a2a_message(self, message: ChatMessage) -> A2AMessage: metadata=cast(dict[str, Any], message.additional_properties), ) - def _a2a_parts_to_contents(self, parts: Sequence[A2APart]) -> list[Contents]: - """Convert A2A Parts to Agent Framework Contents. + def _parse_contents_from_a2a(self, parts: Sequence[A2APart]) -> list[Contents]: + """Parse A2A Parts into Agent Framework Contents. Transforms A2A protocol Parts into framework-native Content objects, handling text, file (URI/bytes), and data parts with metadata preservation. @@ -410,17 +410,17 @@ def _a2a_parts_to_contents(self, parts: Sequence[A2APart]) -> list[Contents]: raise ValueError(f"Unknown Part kind: {inner_part.kind}") return contents - def _task_to_chat_messages(self, task: Task) -> list[ChatMessage]: - """Convert A2A Task artifacts to ChatMessages with ASSISTANT role.""" + def _parse_messages_from_task(self, task: Task) -> list[ChatMessage]: + """Parse A2A Task artifacts into ChatMessages with ASSISTANT role.""" messages: list[ChatMessage] = [] if task.artifacts is not None: for artifact in task.artifacts: - messages.append(self._artifact_to_chat_message(artifact)) + messages.append(self._parse_message_from_artifact(artifact)) elif task.history is not None and len(task.history) > 0: # Include the last history item as the agent response history_item = task.history[-1] - contents = self._a2a_parts_to_contents(history_item.parts) + contents = self._parse_contents_from_a2a(history_item.parts) messages.append( ChatMessage( role=Role.ASSISTANT if history_item.role == A2ARole.agent else Role.USER, @@ -431,9 +431,9 @@ def _task_to_chat_messages(self, task: Task) -> list[ChatMessage]: return messages - def _artifact_to_chat_message(self, artifact: Artifact) -> ChatMessage: - """Convert A2A Artifact to ChatMessage using part contents.""" - contents = self._a2a_parts_to_contents(artifact.parts) + def _parse_message_from_artifact(self, artifact: Artifact) -> ChatMessage: + """Parse A2A Artifact into ChatMessage using part contents.""" + contents = self._parse_contents_from_a2a(artifact.parts) return ChatMessage( role=Role.ASSISTANT, contents=contents, diff --git a/python/packages/a2a/tests/test_a2a_agent.py b/python/packages/a2a/tests/test_a2a_agent.py index 82d3d02875..58ab18fee4 100644 --- a/python/packages/a2a/tests/test_a2a_agent.py +++ b/python/packages/a2a/tests/test_a2a_agent.py @@ -197,18 +197,18 @@ async def test_run_with_unknown_response_type_raises_error(a2a_agent: A2AAgent, await a2a_agent.run("Test message") -def test_task_to_chat_messages_empty_artifacts(a2a_agent: A2AAgent) -> None: - """Test _task_to_chat_messages with task containing no artifacts.""" +def test_parse_messages_from_task_empty_artifacts(a2a_agent: A2AAgent) -> None: + """Test _parse_messages_from_task with task containing no artifacts.""" task = MagicMock() task.artifacts = None - result = a2a_agent._task_to_chat_messages(task) + result = a2a_agent._parse_messages_from_task(task) assert len(result) == 0 -def test_task_to_chat_messages_with_artifacts(a2a_agent: A2AAgent) -> None: - """Test _task_to_chat_messages with task containing artifacts.""" +def test_parse_messages_from_task_with_artifacts(a2a_agent: A2AAgent) -> None: + """Test _parse_messages_from_task with task containing artifacts.""" task = MagicMock() # Create mock artifacts @@ -232,7 +232,7 @@ def test_task_to_chat_messages_with_artifacts(a2a_agent: A2AAgent) -> None: task.artifacts = [artifact1, artifact2] - result = a2a_agent._task_to_chat_messages(task) + result = a2a_agent._parse_messages_from_task(task) assert len(result) == 2 assert result[0].text == "Content 1" @@ -240,8 +240,8 @@ def test_task_to_chat_messages_with_artifacts(a2a_agent: A2AAgent) -> None: assert all(msg.role == Role.ASSISTANT for msg in result) -def test_artifact_to_chat_message(a2a_agent: A2AAgent) -> None: - """Test _artifact_to_chat_message conversion.""" +def test_parse_message_from_artifact(a2a_agent: A2AAgent) -> None: + """Test _parse_message_from_artifact conversion.""" artifact = MagicMock() artifact.artifact_id = "test-artifact" @@ -253,7 +253,7 @@ def test_artifact_to_chat_message(a2a_agent: A2AAgent) -> None: artifact.parts = [text_part] - result = a2a_agent._artifact_to_chat_message(artifact) + result = a2a_agent._parse_message_from_artifact(artifact) assert isinstance(result, ChatMessage) assert result.role == Role.ASSISTANT @@ -276,7 +276,7 @@ def test_get_uri_data_invalid_uri() -> None: _get_uri_data("not-a-valid-data-uri") -def test_a2a_parts_to_contents_conversion(a2a_agent: A2AAgent) -> None: +def test_parse_contents_from_a2a_conversion(a2a_agent: A2AAgent) -> None: """Test A2A parts to contents conversion.""" agent = A2AAgent(name="Test Agent", client=MockA2AClient(), _http_client=None) @@ -285,7 +285,7 @@ def test_a2a_parts_to_contents_conversion(a2a_agent: A2AAgent) -> None: parts = [Part(root=TextPart(text="First part")), Part(root=TextPart(text="Second part"))] # Convert to contents - contents = agent._a2a_parts_to_contents(parts) + contents = agent._parse_contents_from_a2a(parts) # Verify conversion assert len(contents) == 2 @@ -295,30 +295,30 @@ def test_a2a_parts_to_contents_conversion(a2a_agent: A2AAgent) -> None: assert contents[1].text == "Second part" -def test_chat_message_to_a2a_message_with_error_content(a2a_agent: A2AAgent) -> None: - """Test _chat_message_to_a2a_message with ErrorContent.""" +def test_prepare_message_for_a2a_with_error_content(a2a_agent: A2AAgent) -> None: + """Test _prepare_message_for_a2a with ErrorContent.""" # Create ChatMessage with ErrorContent error_content = ErrorContent(message="Test error message") message = ChatMessage(role=Role.USER, contents=[error_content]) # Convert to A2A message - a2a_message = a2a_agent._chat_message_to_a2a_message(message) + a2a_message = a2a_agent._prepare_message_for_a2a(message) # Verify conversion assert len(a2a_message.parts) == 1 assert a2a_message.parts[0].root.text == "Test error message" -def test_chat_message_to_a2a_message_with_uri_content(a2a_agent: A2AAgent) -> None: - """Test _chat_message_to_a2a_message with UriContent.""" +def test_prepare_message_for_a2a_with_uri_content(a2a_agent: A2AAgent) -> None: + """Test _prepare_message_for_a2a with UriContent.""" # Create ChatMessage with UriContent uri_content = UriContent(uri="http://example.com/file.pdf", media_type="application/pdf") message = ChatMessage(role=Role.USER, contents=[uri_content]) # Convert to A2A message - a2a_message = a2a_agent._chat_message_to_a2a_message(message) + a2a_message = a2a_agent._prepare_message_for_a2a(message) # Verify conversion assert len(a2a_message.parts) == 1 @@ -326,15 +326,15 @@ def test_chat_message_to_a2a_message_with_uri_content(a2a_agent: A2AAgent) -> No assert a2a_message.parts[0].root.file.mime_type == "application/pdf" -def test_chat_message_to_a2a_message_with_data_content(a2a_agent: A2AAgent) -> None: - """Test _chat_message_to_a2a_message with DataContent.""" +def test_prepare_message_for_a2a_with_data_content(a2a_agent: A2AAgent) -> None: + """Test _prepare_message_for_a2a with DataContent.""" # Create ChatMessage with DataContent (base64 data URI) data_content = DataContent(uri="data:text/plain;base64,SGVsbG8gV29ybGQ=", media_type="text/plain") message = ChatMessage(role=Role.USER, contents=[data_content]) # Convert to A2A message - a2a_message = a2a_agent._chat_message_to_a2a_message(message) + a2a_message = a2a_agent._prepare_message_for_a2a(message) # Verify conversion assert len(a2a_message.parts) == 1 @@ -342,14 +342,14 @@ def test_chat_message_to_a2a_message_with_data_content(a2a_agent: A2AAgent) -> N assert a2a_message.parts[0].root.file.mime_type == "text/plain" -def test_chat_message_to_a2a_message_empty_contents_raises_error(a2a_agent: A2AAgent) -> None: - """Test _chat_message_to_a2a_message with empty contents raises ValueError.""" +def test_prepare_message_for_a2a_empty_contents_raises_error(a2a_agent: A2AAgent) -> None: + """Test _prepare_message_for_a2a with empty contents raises ValueError.""" # Create ChatMessage with no contents message = ChatMessage(role=Role.USER, contents=[]) # Should raise ValueError for empty contents with raises(ValueError, match="ChatMessage.contents is empty"): - a2a_agent._chat_message_to_a2a_message(message) + a2a_agent._prepare_message_for_a2a(message) async def test_run_stream_with_message_response(a2a_agent: A2AAgent, mock_a2a_client: MockA2AClient) -> None: @@ -405,7 +405,7 @@ async def test_context_manager_no_cleanup_when_no_http_client() -> None: pass -def test_chat_message_to_a2a_message_with_multiple_contents() -> None: +def test_prepare_message_for_a2a_with_multiple_contents() -> None: """Test conversion of ChatMessage with multiple contents.""" agent = A2AAgent(client=MagicMock(), _http_client=None) @@ -421,7 +421,7 @@ def test_chat_message_to_a2a_message_with_multiple_contents() -> None: ], ) - result = agent._chat_message_to_a2a_message(message) + result = agent._prepare_message_for_a2a(message) # Should have converted all 4 contents to parts assert len(result.parts) == 4 @@ -433,7 +433,7 @@ def test_chat_message_to_a2a_message_with_multiple_contents() -> None: assert result.parts[3].root.kind == "text" # JSON text remains as text (no parsing) -def test_a2a_parts_to_contents_with_data_part() -> None: +def test_parse_contents_from_a2a_with_data_part() -> None: """Test conversion of A2A DataPart.""" agent = A2AAgent(client=MagicMock(), _http_client=None) @@ -441,7 +441,7 @@ def test_a2a_parts_to_contents_with_data_part() -> None: # Create DataPart data_part = Part(root=DataPart(data={"key": "value", "number": 42}, metadata={"source": "test"})) - contents = agent._a2a_parts_to_contents([data_part]) + contents = agent._parse_contents_from_a2a([data_part]) assert len(contents) == 1 @@ -450,7 +450,7 @@ def test_a2a_parts_to_contents_with_data_part() -> None: assert contents[0].additional_properties == {"source": "test"} -def test_a2a_parts_to_contents_unknown_part_kind() -> None: +def test_parse_contents_from_a2a_unknown_part_kind() -> None: """Test error handling for unknown A2A part kind.""" agent = A2AAgent(client=MagicMock(), _http_client=None) @@ -459,10 +459,10 @@ def test_a2a_parts_to_contents_unknown_part_kind() -> None: mock_part.root.kind = "unknown_kind" with raises(ValueError, match="Unknown Part kind: unknown_kind"): - agent._a2a_parts_to_contents([mock_part]) + agent._parse_contents_from_a2a([mock_part]) -def test_chat_message_to_a2a_message_with_hosted_file() -> None: +def test_prepare_message_for_a2a_with_hosted_file() -> None: """Test conversion of ChatMessage with HostedFileContent to A2A message.""" agent = A2AAgent(client=MagicMock(), _http_client=None) @@ -473,7 +473,7 @@ def test_chat_message_to_a2a_message_with_hosted_file() -> None: contents=[HostedFileContent(file_id="hosted://storage/document.pdf")], ) - result = agent._chat_message_to_a2a_message(message) # noqa: SLF001 + result = agent._prepare_message_for_a2a(message) # noqa: SLF001 # Verify the conversion assert len(result.parts) == 1 @@ -488,7 +488,7 @@ def test_chat_message_to_a2a_message_with_hosted_file() -> None: assert part.root.file.mime_type is None # HostedFileContent doesn't specify media_type -def test_a2a_parts_to_contents_with_hosted_file_uri() -> None: +def test_parse_contents_from_a2a_with_hosted_file_uri() -> None: """Test conversion of A2A FilePart with hosted file URI back to UriContent.""" agent = A2AAgent(client=MagicMock(), _http_client=None) @@ -503,7 +503,7 @@ def test_a2a_parts_to_contents_with_hosted_file_uri() -> None: ) ) - contents = agent._a2a_parts_to_contents([file_part]) # noqa: SLF001 + contents = agent._parse_contents_from_a2a([file_part]) # noqa: SLF001 assert len(contents) == 1 diff --git a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py index e4eca2d005..a5b169fbbf 100644 --- a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py +++ b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py @@ -25,7 +25,6 @@ TextContent, TextReasoningContent, TextSpanRegion, - ToolProtocol, UsageContent, UsageDetails, get_logger, @@ -214,9 +213,11 @@ async def _inner_get_response( chat_options: ChatOptions, **kwargs: Any, ) -> ChatResponse: - # Extract necessary state from messages and options - run_options = self._create_run_options(messages, chat_options, **kwargs) + # prepare + run_options = self._prepare_options(messages, chat_options, **kwargs) + # execute message = await self.anthropic_client.beta.messages.create(**run_options, stream=False) + # process return self._process_message(message) async def _inner_get_streaming_response( @@ -226,16 +227,17 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - # Extract necessary state from messages and options - run_options = self._create_run_options(messages, chat_options, **kwargs) + # prepare + run_options = self._prepare_options(messages, chat_options, **kwargs) + # execute and process async for chunk in await self.anthropic_client.beta.messages.create(**run_options, stream=True): parsed_chunk = self._process_stream_event(chunk) if parsed_chunk: yield parsed_chunk - # region Create Run Options and Helpers + # region Prep methods - def _create_run_options( + def _prepare_options( self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, @@ -251,78 +253,91 @@ def _create_run_options( Returns: A dictionary of run options for the Anthropic client. """ - if chat_options.additional_properties and "additional_beta_flags" in chat_options.additional_properties: - betas = chat_options.additional_properties.pop("additional_beta_flags") - else: - betas = [] - run_options: dict[str, Any] = { - "model": chat_options.model_id or self.model_id, - "messages": self._convert_messages_to_anthropic_format(messages), - "max_tokens": chat_options.max_tokens or ANTHROPIC_DEFAULT_MAX_TOKENS, - "extra_headers": {"User-Agent": AGENT_FRAMEWORK_USER_AGENT}, - "betas": {*BETA_FLAGS, *self.additional_beta_flags, *betas}, + run_options: dict[str, Any] = chat_options.to_dict( + exclude={ + "type", + "instructions", # handled via system message + "tool_choice", # handled separately + "allow_multiple_tool_calls", # handled via tool_choice + "additional_properties", # handled separately + } + ) + + # translations between ChatOptions and Anthropic API + translations = { + "model_id": "model", + "stop": "stop_sequences", } + for old_key, new_key in translations.items(): + if old_key in run_options and old_key != new_key: + run_options[new_key] = run_options.pop(old_key) + + # model id + if not run_options.get("model"): + if not self.model_id: + raise ValueError("model_id must be a non-empty string") + run_options["model"] = self.model_id + + # max_tokens - Anthropic requires this, default if not provided + if not run_options.get("max_tokens"): + run_options["max_tokens"] = ANTHROPIC_DEFAULT_MAX_TOKENS - # Add any additional options from chat_options or kwargs - if chat_options.temperature is not None: - run_options["temperature"] = chat_options.temperature - if chat_options.top_p is not None: - run_options["top_p"] = chat_options.top_p - if chat_options.stop is not None: - run_options["stop_sequences"] = chat_options.stop + # messages + run_options["messages"] = self._prepare_messages_for_anthropic(messages) + + # system message - first system message is passed as instructions if messages and isinstance(messages[0], ChatMessage) and messages[0].role == Role.SYSTEM: - # first system message is passed as instructions run_options["system"] = messages[0].text - if chat_options.tool_choice is not None: - match ( - chat_options.tool_choice if isinstance(chat_options.tool_choice, str) else chat_options.tool_choice.mode - ): - case "auto": - run_options["tool_choice"] = {"type": "auto"} - if chat_options.allow_multiple_tool_calls is not None: - run_options["tool_choice"][ # type:ignore[reportArgumentType] - "disable_parallel_tool_use" - ] = not chat_options.allow_multiple_tool_calls - case "required": - if chat_options.tool_choice.required_function_name: - run_options["tool_choice"] = { - "type": "tool", - "name": chat_options.tool_choice.required_function_name, - } - if chat_options.allow_multiple_tool_calls is not None: - run_options["tool_choice"][ # type:ignore[reportArgumentType] - "disable_parallel_tool_use" - ] = not chat_options.allow_multiple_tool_calls - else: - run_options["tool_choice"] = {"type": "any"} - if chat_options.allow_multiple_tool_calls is not None: - run_options["tool_choice"][ # type:ignore[reportArgumentType] - "disable_parallel_tool_use" - ] = not chat_options.allow_multiple_tool_calls - case "none": - run_options["tool_choice"] = {"type": "none"} - case _: - logger.debug(f"Ignoring unsupported tool choice mode: {chat_options.tool_choice.mode} for now") - if tools_and_mcp := self._convert_tools_to_anthropic_format(chat_options.tools): - run_options.update(tools_and_mcp) - if chat_options.additional_properties: - run_options.update(chat_options.additional_properties) + + # betas + run_options["betas"] = self._prepare_betas(chat_options) + + # extra headers + run_options["extra_headers"] = {"User-Agent": AGENT_FRAMEWORK_USER_AGENT} + + # tools, mcp servers and tool choice + if tools_config := self._prepare_tools_for_anthropic(chat_options): + run_options.update(tools_config) + + # additional properties + additional_options = { + key: value + for key, value in chat_options.additional_properties.items() + if value is not None and key != "additional_beta_flags" + } + if additional_options: + run_options.update(additional_options) run_options.update(kwargs) return run_options - def _convert_messages_to_anthropic_format(self, messages: MutableSequence[ChatMessage]) -> list[dict[str, Any]]: - """Convert a list of ChatMessages to the format expected by the Anthropic client. + def _prepare_betas(self, chat_options: ChatOptions) -> set[str]: + """Prepare the beta flags for the Anthropic API request. + + Args: + chat_options: The chat options that may contain additional beta flags. + + Returns: + A set of beta flag strings to include in the request. + """ + return { + *BETA_FLAGS, + *self.additional_beta_flags, + *chat_options.additional_properties.get("additional_beta_flags", []), + } + + def _prepare_messages_for_anthropic(self, messages: MutableSequence[ChatMessage]) -> list[dict[str, Any]]: + """Prepare a list of ChatMessages for the Anthropic client. This skips the first message if it is a system message, as Anthropic expects system instructions as a separate parameter. """ # first system message is passed as instructions if messages and isinstance(messages[0], ChatMessage) and messages[0].role == Role.SYSTEM: - return [self._convert_message_to_anthropic_format(msg) for msg in messages[1:]] - return [self._convert_message_to_anthropic_format(msg) for msg in messages] + return [self._prepare_message_for_anthropic(msg) for msg in messages[1:]] + return [self._prepare_message_for_anthropic(msg) for msg in messages] - def _convert_message_to_anthropic_format(self, message: ChatMessage) -> dict[str, Any]: - """Convert a ChatMessage to the format expected by the Anthropic client. + def _prepare_message_for_anthropic(self, message: ChatMessage) -> dict[str, Any]: + """Prepare a ChatMessage for the Anthropic client. Args: message: The ChatMessage to convert. @@ -376,58 +391,96 @@ def _convert_message_to_anthropic_format(self, message: ChatMessage) -> dict[str "content": a_content, } - def _convert_tools_to_anthropic_format( - self, tools: list[ToolProtocol | MutableMapping[str, Any]] | None - ) -> dict[str, Any] | None: - if not tools: - return None - tool_list: list[MutableMapping[str, Any]] = [] - mcp_server_list: list[MutableMapping[str, Any]] = [] - for tool in tools: - match tool: - case MutableMapping(): - tool_list.append(tool) - case AIFunction(): - tool_list.append({ - "type": "custom", - "name": tool.name, - "description": tool.description, - "input_schema": tool.parameters(), - }) - case HostedWebSearchTool(): - search_tool: dict[str, Any] = { - "type": "web_search_20250305", - "name": "web_search", - } - if tool.additional_properties: - search_tool.update(tool.additional_properties) - tool_list.append(search_tool) - case HostedCodeInterpreterTool(): - code_tool: dict[str, Any] = { - "type": "code_execution_20250825", - "name": "code_execution", - } - tool_list.append(code_tool) - case HostedMCPTool(): - server_def: dict[str, Any] = { - "type": "url", - "name": tool.name, - "url": str(tool.url), - } - if tool.allowed_tools: - server_def["tool_configuration"] = {"allowed_tools": list(tool.allowed_tools)} - if tool.headers and (auth := tool.headers.get("authorization")): - server_def["authorization_token"] = auth - mcp_server_list.append(server_def) + def _prepare_tools_for_anthropic(self, chat_options: ChatOptions) -> dict[str, Any] | None: + """Prepare tools and tool choice configuration for the Anthropic API request. + + Args: + chat_options: The chat options containing tools and tool choice settings. + + Returns: + A dictionary with tools, mcp_servers, and tool_choice configuration, or None if empty. + """ + result: dict[str, Any] = {} + + # Process tools + if chat_options.tools: + tool_list: list[MutableMapping[str, Any]] = [] + mcp_server_list: list[MutableMapping[str, Any]] = [] + for tool in chat_options.tools: + match tool: + case MutableMapping(): + tool_list.append(tool) + case AIFunction(): + tool_list.append({ + "type": "custom", + "name": tool.name, + "description": tool.description, + "input_schema": tool.parameters(), + }) + case HostedWebSearchTool(): + search_tool: dict[str, Any] = { + "type": "web_search_20250305", + "name": "web_search", + } + if tool.additional_properties: + search_tool.update(tool.additional_properties) + tool_list.append(search_tool) + case HostedCodeInterpreterTool(): + code_tool: dict[str, Any] = { + "type": "code_execution_20250825", + "name": "code_execution", + } + tool_list.append(code_tool) + case HostedMCPTool(): + server_def: dict[str, Any] = { + "type": "url", + "name": tool.name, + "url": str(tool.url), + } + if tool.allowed_tools: + server_def["tool_configuration"] = {"allowed_tools": list(tool.allowed_tools)} + if tool.headers and (auth := tool.headers.get("authorization")): + server_def["authorization_token"] = auth + mcp_server_list.append(server_def) + case _: + logger.debug(f"Ignoring unsupported tool type: {type(tool)} for now") + + if tool_list: + result["tools"] = tool_list + if mcp_server_list: + result["mcp_servers"] = mcp_server_list + + # Process tool choice + if chat_options.tool_choice is not None: + tool_choice_mode = ( + chat_options.tool_choice if isinstance(chat_options.tool_choice, str) else chat_options.tool_choice.mode + ) + match tool_choice_mode: + case "auto": + tool_choice: dict[str, Any] = {"type": "auto"} + if chat_options.allow_multiple_tool_calls is not None: + tool_choice["disable_parallel_tool_use"] = not chat_options.allow_multiple_tool_calls + result["tool_choice"] = tool_choice + case "required": + if ( + not isinstance(chat_options.tool_choice, str) + and chat_options.tool_choice.required_function_name + ): + tool_choice = { + "type": "tool", + "name": chat_options.tool_choice.required_function_name, + } + else: + tool_choice = {"type": "any"} + if chat_options.allow_multiple_tool_calls is not None: + tool_choice["disable_parallel_tool_use"] = not chat_options.allow_multiple_tool_calls + result["tool_choice"] = tool_choice + case "none": + result["tool_choice"] = {"type": "none"} case _: - logger.debug(f"Ignoring unsupported tool type: {type(tool)} for now") + logger.debug(f"Ignoring unsupported tool choice mode: {tool_choice_mode} for now") - all_tools: dict[str, list[MutableMapping[str, Any]]] = {} - if tool_list: - all_tools["tools"] = tool_list - if mcp_server_list: - all_tools["mcp_servers"] = mcp_server_list - return all_tools + return result or None # region Response Processing Methods @@ -445,11 +498,11 @@ def _process_message(self, message: BetaMessage) -> ChatResponse: messages=[ ChatMessage( role=Role.ASSISTANT, - contents=self._parse_message_contents(message.content), + contents=self._parse_contents_from_anthropic(message.content), raw_representation=message, ) ], - usage_details=self._parse_message_usage(message.usage), + usage_details=self._parse_usage_from_anthropic(message.usage), model_id=message.model, finish_reason=FINISH_REASON_MAP.get(message.stop_reason) if message.stop_reason else None, raw_response=message, @@ -467,12 +520,12 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons match event.type: case "message_start": usage_details: list[UsageContent] = [] - if event.message.usage and (details := self._parse_message_usage(event.message.usage)): + if event.message.usage and (details := self._parse_usage_from_anthropic(event.message.usage)): usage_details.append(UsageContent(details=details)) return ChatResponseUpdate( response_id=event.message.id, - contents=[*self._parse_message_contents(event.message.content), *usage_details], + contents=[*self._parse_contents_from_anthropic(event.message.content), *usage_details], model_id=event.message.model, finish_reason=FINISH_REASON_MAP.get(event.message.stop_reason) if event.message.stop_reason @@ -480,7 +533,7 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons raw_response=event, ) case "message_delta": - usage = self._parse_message_usage(event.usage) + usage = self._parse_usage_from_anthropic(event.usage) return ChatResponseUpdate( contents=[UsageContent(details=usage, raw_representation=event.usage)] if usage else [], raw_response=event, @@ -488,13 +541,13 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons case "message_stop": logger.debug("Received message_stop event; no content to process.") case "content_block_start": - contents = self._parse_message_contents([event.content_block]) + contents = self._parse_contents_from_anthropic([event.content_block]) return ChatResponseUpdate( contents=contents, raw_response=event, ) case "content_block_delta": - contents = self._parse_message_contents([event.delta]) + contents = self._parse_contents_from_anthropic([event.delta]) return ChatResponseUpdate( contents=contents, raw_response=event, @@ -505,7 +558,7 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons logger.debug(f"Ignoring unsupported event type: {event.type}") return None - def _parse_message_usage(self, usage: BetaUsage | BetaMessageDeltaUsage | None) -> UsageDetails | None: + def _parse_usage_from_anthropic(self, usage: BetaUsage | BetaMessageDeltaUsage | None) -> UsageDetails | None: """Parse usage details from the Anthropic message usage.""" if not usage: return None @@ -518,7 +571,7 @@ def _parse_message_usage(self, usage: BetaUsage | BetaMessageDeltaUsage | None) usage_details.additional_counts["anthropic.cache_read_input_tokens"] = usage.cache_read_input_tokens return usage_details - def _parse_message_contents( + def _parse_contents_from_anthropic( self, content: Sequence[BetaContentBlock | BetaRawContentBlockDelta | BetaTextBlock] ) -> list[Contents]: """Parse contents from the Anthropic message.""" @@ -530,7 +583,7 @@ def _parse_message_contents( TextContent( text=content_block.text, raw_representation=content_block, - annotations=self._parse_citations(content_block), + annotations=self._parse_citations_from_anthropic(content_block), ) ) case "tool_use" | "mcp_tool_use" | "server_tool_use": @@ -549,7 +602,7 @@ def _parse_message_contents( FunctionResultContent( call_id=content_block.tool_use_id, name=name if name and call_id == content_block.tool_use_id else "mcp_tool", - result=self._parse_message_contents(content_block.content) + result=self._parse_contents_from_anthropic(content_block.content) if isinstance(content_block.content, list) else content_block.content, raw_representation=content_block, @@ -608,7 +661,7 @@ def _parse_message_contents( logger.debug(f"Ignoring unsupported content type: {content_block.type} for now") return contents - def _parse_citations( + def _parse_citations_from_anthropic( self, content_block: BetaContentBlock | BetaRawContentBlockDelta | BetaTextBlock ) -> list[Annotations] | None: content_citations = getattr(content_block, "citations", None) diff --git a/python/packages/anthropic/tests/test_anthropic_client.py b/python/packages/anthropic/tests/test_anthropic_client.py index fa6061a998..e8a3ac9cb0 100644 --- a/python/packages/anthropic/tests/test_anthropic_client.py +++ b/python/packages/anthropic/tests/test_anthropic_client.py @@ -151,12 +151,12 @@ def test_anthropic_client_service_url(mock_anthropic_client: MagicMock) -> None: # Message Conversion Tests -def test_convert_message_to_anthropic_format_text(mock_anthropic_client: MagicMock) -> None: +def test_prepare_message_for_anthropic_text(mock_anthropic_client: MagicMock) -> None: """Test converting text message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) message = ChatMessage(role=Role.USER, text="Hello, world!") - result = chat_client._convert_message_to_anthropic_format(message) + result = chat_client._prepare_message_for_anthropic(message) assert result["role"] == "user" assert len(result["content"]) == 1 @@ -164,7 +164,7 @@ def test_convert_message_to_anthropic_format_text(mock_anthropic_client: MagicMo assert result["content"][0]["text"] == "Hello, world!" -def test_convert_message_to_anthropic_format_function_call(mock_anthropic_client: MagicMock) -> None: +def test_prepare_message_for_anthropic_function_call(mock_anthropic_client: MagicMock) -> None: """Test converting function call message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) message = ChatMessage( @@ -178,7 +178,7 @@ def test_convert_message_to_anthropic_format_function_call(mock_anthropic_client ], ) - result = chat_client._convert_message_to_anthropic_format(message) + result = chat_client._prepare_message_for_anthropic(message) assert result["role"] == "assistant" assert len(result["content"]) == 1 @@ -188,7 +188,7 @@ def test_convert_message_to_anthropic_format_function_call(mock_anthropic_client assert result["content"][0]["input"] == {"location": "San Francisco"} -def test_convert_message_to_anthropic_format_function_result(mock_anthropic_client: MagicMock) -> None: +def test_prepare_message_for_anthropic_function_result(mock_anthropic_client: MagicMock) -> None: """Test converting function result message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) message = ChatMessage( @@ -202,7 +202,7 @@ def test_convert_message_to_anthropic_format_function_result(mock_anthropic_clie ], ) - result = chat_client._convert_message_to_anthropic_format(message) + result = chat_client._prepare_message_for_anthropic(message) assert result["role"] == "user" assert len(result["content"]) == 1 @@ -214,7 +214,7 @@ def test_convert_message_to_anthropic_format_function_result(mock_anthropic_clie assert result["content"][0]["is_error"] is False -def test_convert_message_to_anthropic_format_text_reasoning(mock_anthropic_client: MagicMock) -> None: +def test_prepare_message_for_anthropic_text_reasoning(mock_anthropic_client: MagicMock) -> None: """Test converting text reasoning message to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) message = ChatMessage( @@ -222,7 +222,7 @@ def test_convert_message_to_anthropic_format_text_reasoning(mock_anthropic_clien contents=[TextReasoningContent(text="Let me think about this...")], ) - result = chat_client._convert_message_to_anthropic_format(message) + result = chat_client._prepare_message_for_anthropic(message) assert result["role"] == "assistant" assert len(result["content"]) == 1 @@ -230,7 +230,7 @@ def test_convert_message_to_anthropic_format_text_reasoning(mock_anthropic_clien assert result["content"][0]["thinking"] == "Let me think about this..." -def test_convert_messages_to_anthropic_format_with_system(mock_anthropic_client: MagicMock) -> None: +def test_prepare_messages_for_anthropic_with_system(mock_anthropic_client: MagicMock) -> None: """Test converting messages list with system message.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ @@ -238,7 +238,7 @@ def test_convert_messages_to_anthropic_format_with_system(mock_anthropic_client: ChatMessage(role=Role.USER, text="Hello!"), ] - result = chat_client._convert_messages_to_anthropic_format(messages) + result = chat_client._prepare_messages_for_anthropic(messages) # System message should be skipped assert len(result) == 1 @@ -246,7 +246,7 @@ def test_convert_messages_to_anthropic_format_with_system(mock_anthropic_client: assert result[0]["content"][0]["text"] == "Hello!" -def test_convert_messages_to_anthropic_format_without_system(mock_anthropic_client: MagicMock) -> None: +def test_prepare_messages_for_anthropic_without_system(mock_anthropic_client: MagicMock) -> None: """Test converting messages list without system message.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ @@ -254,7 +254,7 @@ def test_convert_messages_to_anthropic_format_without_system(mock_anthropic_clie ChatMessage(role=Role.ASSISTANT, text="Hi there!"), ] - result = chat_client._convert_messages_to_anthropic_format(messages) + result = chat_client._prepare_messages_for_anthropic(messages) assert len(result) == 2 assert result[0]["role"] == "user" @@ -264,7 +264,7 @@ def test_convert_messages_to_anthropic_format_without_system(mock_anthropic_clie # Tool Conversion Tests -def test_convert_tools_to_anthropic_format_ai_function(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_ai_function(mock_anthropic_client: MagicMock) -> None: """Test converting AIFunction to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) @@ -273,9 +273,8 @@ def get_weather(location: Annotated[str, Field(description="Location to get weat """Get weather for a location.""" return f"Weather for {location}" - tools = [get_weather] - - result = chat_client._convert_tools_to_anthropic_format(tools) + chat_options = ChatOptions(tools=[get_weather]) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -285,12 +284,12 @@ def get_weather(location: Annotated[str, Field(description="Location to get weat assert "Get weather for a location" in result["tools"][0]["description"] -def test_convert_tools_to_anthropic_format_web_search(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_web_search(mock_anthropic_client: MagicMock) -> None: """Test converting HostedWebSearchTool to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - tools = [HostedWebSearchTool()] + chat_options = ChatOptions(tools=[HostedWebSearchTool()]) - result = chat_client._convert_tools_to_anthropic_format(tools) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -299,12 +298,12 @@ def test_convert_tools_to_anthropic_format_web_search(mock_anthropic_client: Mag assert result["tools"][0]["name"] == "web_search" -def test_convert_tools_to_anthropic_format_code_interpreter(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_code_interpreter(mock_anthropic_client: MagicMock) -> None: """Test converting HostedCodeInterpreterTool to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - tools = [HostedCodeInterpreterTool()] + chat_options = ChatOptions(tools=[HostedCodeInterpreterTool()]) - result = chat_client._convert_tools_to_anthropic_format(tools) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -313,12 +312,12 @@ def test_convert_tools_to_anthropic_format_code_interpreter(mock_anthropic_clien assert result["tools"][0]["name"] == "code_execution" -def test_convert_tools_to_anthropic_format_mcp_tool(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_mcp_tool(mock_anthropic_client: MagicMock) -> None: """Test converting HostedMCPTool to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - tools = [HostedMCPTool(name="test-mcp", url="https://example.com/mcp")] + chat_options = ChatOptions(tools=[HostedMCPTool(name="test-mcp", url="https://example.com/mcp")]) - result = chat_client._convert_tools_to_anthropic_format(tools) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "mcp_servers" in result @@ -328,18 +327,20 @@ def test_convert_tools_to_anthropic_format_mcp_tool(mock_anthropic_client: Magic assert result["mcp_servers"][0]["url"] == "https://example.com/mcp" -def test_convert_tools_to_anthropic_format_mcp_with_auth(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_mcp_with_auth(mock_anthropic_client: MagicMock) -> None: """Test converting HostedMCPTool with authorization headers.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - tools = [ - HostedMCPTool( - name="test-mcp", - url="https://example.com/mcp", - headers={"authorization": "Bearer token123"}, - ) - ] + chat_options = ChatOptions( + tools=[ + HostedMCPTool( + name="test-mcp", + url="https://example.com/mcp", + headers={"authorization": "Bearer token123"}, + ) + ] + ) - result = chat_client._convert_tools_to_anthropic_format(tools) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "mcp_servers" in result @@ -348,12 +349,12 @@ def test_convert_tools_to_anthropic_format_mcp_with_auth(mock_anthropic_client: assert result["mcp_servers"][0]["authorization_token"] == "Bearer token123" -def test_convert_tools_to_anthropic_format_dict_tool(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_dict_tool(mock_anthropic_client: MagicMock) -> None: """Test converting dict tool to Anthropic format.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - tools = [{"type": "custom", "name": "custom_tool", "description": "A custom tool"}] + chat_options = ChatOptions(tools=[{"type": "custom", "name": "custom_tool", "description": "A custom tool"}]) - result = chat_client._convert_tools_to_anthropic_format(tools) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is not None assert "tools" in result @@ -361,11 +362,12 @@ def test_convert_tools_to_anthropic_format_dict_tool(mock_anthropic_client: Magi assert result["tools"][0]["name"] == "custom_tool" -def test_convert_tools_to_anthropic_format_none(mock_anthropic_client: MagicMock) -> None: +def test_prepare_tools_for_anthropic_none(mock_anthropic_client: MagicMock) -> None: """Test converting None tools.""" chat_client = create_test_anthropic_client(mock_anthropic_client) + chat_options = ChatOptions() - result = chat_client._convert_tools_to_anthropic_format(None) + result = chat_client._prepare_tools_for_anthropic(chat_options) assert result is None @@ -373,14 +375,14 @@ def test_convert_tools_to_anthropic_format_none(mock_anthropic_client: MagicMock # Run Options Tests -async def test_create_run_options_basic(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with basic ChatOptions.""" +async def test_prepare_options_basic(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with basic ChatOptions.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(max_tokens=100, temperature=0.7) - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["model"] == chat_client.model_id assert run_options["max_tokens"] == 100 @@ -388,8 +390,8 @@ async def test_create_run_options_basic(mock_anthropic_client: MagicMock) -> Non assert "messages" in run_options -async def test_create_run_options_with_system_message(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with system message.""" +async def test_prepare_options_with_system_message(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with system message.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ @@ -398,52 +400,52 @@ async def test_create_run_options_with_system_message(mock_anthropic_client: Mag ] chat_options = ChatOptions() - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["system"] == "You are helpful." assert len(run_options["messages"]) == 1 # System message not in messages list -async def test_create_run_options_with_tool_choice_auto(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with auto tool choice.""" +async def test_prepare_options_with_tool_choice_auto(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with auto tool choice.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(tool_choice="auto") - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["tool_choice"]["type"] == "auto" -async def test_create_run_options_with_tool_choice_required(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with required tool choice.""" +async def test_prepare_options_with_tool_choice_required(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with required tool choice.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] # For required with specific function, need to pass as dict chat_options = ChatOptions(tool_choice={"mode": "required", "required_function_name": "get_weather"}) - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["tool_choice"]["type"] == "tool" assert run_options["tool_choice"]["name"] == "get_weather" -async def test_create_run_options_with_tool_choice_none(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with none tool choice.""" +async def test_prepare_options_with_tool_choice_none(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with none tool choice.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(tool_choice="none") - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["tool_choice"]["type"] == "none" -async def test_create_run_options_with_tools(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with tools.""" +async def test_prepare_options_with_tools(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with tools.""" chat_client = create_test_anthropic_client(mock_anthropic_client) @ai_function @@ -454,32 +456,32 @@ def get_weather(location: str) -> str: messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(tools=[get_weather]) - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert "tools" in run_options assert len(run_options["tools"]) == 1 -async def test_create_run_options_with_stop_sequences(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with stop sequences.""" +async def test_prepare_options_with_stop_sequences(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with stop sequences.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(stop=["STOP", "END"]) - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["stop_sequences"] == ["STOP", "END"] -async def test_create_run_options_with_top_p(mock_anthropic_client: MagicMock) -> None: - """Test _create_run_options with top_p.""" +async def test_prepare_options_with_top_p(mock_anthropic_client: MagicMock) -> None: + """Test _prepare_options with top_p.""" chat_client = create_test_anthropic_client(mock_anthropic_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(top_p=0.9) - run_options = chat_client._create_run_options(messages, chat_options) + run_options = chat_client._prepare_options(messages, chat_options) assert run_options["top_p"] == 0.9 @@ -540,41 +542,41 @@ def test_process_message_with_tool_use(mock_anthropic_client: MagicMock) -> None assert response.finish_reason == FinishReason.TOOL_CALLS -def test_parse_message_usage_basic(mock_anthropic_client: MagicMock) -> None: - """Test _parse_message_usage with basic usage.""" +def test_parse_usage_from_anthropic_basic(mock_anthropic_client: MagicMock) -> None: + """Test _parse_usage_from_anthropic with basic usage.""" chat_client = create_test_anthropic_client(mock_anthropic_client) usage = BetaUsage(input_tokens=10, output_tokens=5) - result = chat_client._parse_message_usage(usage) + result = chat_client._parse_usage_from_anthropic(usage) assert result is not None assert result.input_token_count == 10 assert result.output_token_count == 5 -def test_parse_message_usage_none(mock_anthropic_client: MagicMock) -> None: - """Test _parse_message_usage with None usage.""" +def test_parse_usage_from_anthropic_none(mock_anthropic_client: MagicMock) -> None: + """Test _parse_usage_from_anthropic with None usage.""" chat_client = create_test_anthropic_client(mock_anthropic_client) - result = chat_client._parse_message_usage(None) + result = chat_client._parse_usage_from_anthropic(None) assert result is None -def test_parse_message_contents_text(mock_anthropic_client: MagicMock) -> None: - """Test _parse_message_contents with text content.""" +def test_parse_contents_from_anthropic_text(mock_anthropic_client: MagicMock) -> None: + """Test _parse_contents_from_anthropic with text content.""" chat_client = create_test_anthropic_client(mock_anthropic_client) content = [BetaTextBlock(type="text", text="Hello!")] - result = chat_client._parse_message_contents(content) + result = chat_client._parse_contents_from_anthropic(content) assert len(result) == 1 assert isinstance(result[0], TextContent) assert result[0].text == "Hello!" -def test_parse_message_contents_tool_use(mock_anthropic_client: MagicMock) -> None: - """Test _parse_message_contents with tool use.""" +def test_parse_contents_from_anthropic_tool_use(mock_anthropic_client: MagicMock) -> None: + """Test _parse_contents_from_anthropic with tool use.""" chat_client = create_test_anthropic_client(mock_anthropic_client) content = [ @@ -585,7 +587,7 @@ def test_parse_message_contents_tool_use(mock_anthropic_client: MagicMock) -> No input={"location": "SF"}, ) ] - result = chat_client._parse_message_contents(content) + result = chat_client._parse_contents_from_anthropic(content) assert len(result) == 1 assert isinstance(result[0], FunctionCallContent) diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py index 839687fbaf..50d18bbdc1 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py @@ -278,22 +278,13 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - # Extract necessary state from messages and options - run_options, required_action_results = await self._create_run_options(messages, chat_options, **kwargs) - - # Get the thread ID - thread_id: str | None = ( - chat_options.conversation_id - if chat_options.conversation_id is not None - else run_options.get("conversation_id", self.thread_id) - ) - - # Determine which agent to use and create if needed + # prepare + run_options, required_action_results = await self._prepare_options(messages, chat_options, **kwargs) agent_id = await self._get_agent_id_or_create(run_options) - # Process and yield each update from the stream + # execute and process async for update in self._process_stream( - *(await self._create_agent_stream(thread_id, agent_id, run_options, required_action_results)) + *(await self._create_agent_stream(agent_id, run_options, required_action_results)) ): yield update @@ -342,7 +333,6 @@ async def _get_agent_id_or_create(self, run_options: dict[str, Any] | None = Non async def _create_agent_stream( self, - thread_id: str | None, agent_id: str, run_options: dict[str, Any], required_action_results: list[FunctionResultContent | FunctionApprovalResponseContent] | None, @@ -352,14 +342,14 @@ async def _create_agent_stream( Returns: tuple: (stream, final_thread_id) """ + thread_id = run_options.pop("thread_id", None) + # Get any active run for this thread thread_run = await self._get_active_thread_run(thread_id) stream: AsyncAgentRunStream[AsyncAgentEventHandler[Any]] | AsyncAgentEventHandler[Any] handler: AsyncAgentEventHandler[Any] = AsyncAgentEventHandler() - tool_run_id, tool_outputs, tool_approvals = self._convert_required_action_to_tool_output( - required_action_results - ) + tool_run_id, tool_outputs, tool_approvals = self._prepare_tool_outputs_for_azure_ai(required_action_results) if ( thread_run is not None @@ -421,19 +411,11 @@ async def _prepare_thread( # No thread ID was provided, so create a new thread. thread = await self.agents_client.threads.create( - tool_resources=run_options.get("tool_resources"), metadata=run_options.get("metadata") + tool_resources=run_options.get("tool_resources"), + metadata=run_options.get("metadata"), + messages=run_options.get("additional_messages"), ) - thread_id = thread.id - # workaround for: https://github.com/Azure/azure-sdk-for-python/issues/42805 - # this occurs when otel is enabled - # once fixed, in the function above, readd: - # `messages=run_options.pop("additional_messages")` - for msg in run_options.pop("additional_messages", []): - await self.agents_client.messages.create( - thread_id=thread_id, role=msg.role, content=msg.content, metadata=msg.metadata - ) - # and remove until here. - return thread_id + return thread.id def _extract_url_citations( self, message_delta_chunk: MessageDeltaChunk, azure_search_tool_calls: list[dict[str, Any]] @@ -611,7 +593,7 @@ async def _process_stream( "submit_tool_outputs", "submit_tool_approval", ]: - function_call_contents = self._create_function_call_contents( + function_call_contents = self._parse_function_calls_from_azure_ai( event_data, response_id ) if function_call_contents: @@ -753,8 +735,8 @@ def _capture_azure_search_tool_calls( except Exception as ex: logger.debug(f"Failed to capture Azure AI Search tool call: {ex}") - def _create_function_call_contents(self, event_data: ThreadRun, response_id: str | None) -> list[Contents]: - """Create function call contents from a tool action event.""" + def _parse_function_calls_from_azure_ai(self, event_data: ThreadRun, response_id: str | None) -> list[Contents]: + """Parse function call contents from an Azure AI tool action event.""" if isinstance(event_data, ThreadRun) and event_data.required_action is not None: if isinstance(event_data.required_action, SubmitToolOutputsAction): return [ @@ -815,117 +797,197 @@ def _prepare_tool_choice(self, chat_options: ChatOptions) -> None: chat_options.tool_choice = chat_tool_mode - async def _create_run_options( + async def _prepare_options( self, messages: MutableSequence[ChatMessage], - chat_options: ChatOptions | None, + chat_options: ChatOptions, **kwargs: Any, ) -> tuple[dict[str, Any], list[FunctionResultContent | FunctionApprovalResponseContent] | None]: - run_options: dict[str, Any] = {**kwargs} - agent_definition = await self._load_agent_definition_if_needed() - if chat_options is not None: - run_options["max_completion_tokens"] = chat_options.max_tokens - if chat_options.model_id is not None: - run_options["model"] = chat_options.model_id - else: - run_options["model"] = self.model_id - run_options["top_p"] = chat_options.top_p - run_options["temperature"] = chat_options.temperature - run_options["parallel_tool_calls"] = chat_options.allow_multiple_tool_calls - - tool_definitions: list[ToolDefinition | dict[str, Any]] = [] - - # Add tools from existing agent - if agent_definition is not None: - # Don't include function tools, since they will be passed through chat_options.tools - agent_tools = [tool for tool in agent_definition.tools if not isinstance(tool, FunctionToolDefinition)] - if agent_tools: - tool_definitions.extend(agent_tools) - if agent_definition.tool_resources: - run_options["tool_resources"] = agent_definition.tool_resources - - if chat_options.tool_choice is not None: - if chat_options.tool_choice != "none" and chat_options.tools: - # Add run tools - tool_definitions.extend(await self._prep_tools(chat_options.tools, run_options)) - - # Handle MCP tool resources for approval mode - mcp_tools = [tool for tool in chat_options.tools if isinstance(tool, HostedMCPTool)] - if mcp_tools: - mcp_resources = [] - for mcp_tool in mcp_tools: - server_label = mcp_tool.name.replace(" ", "_") - mcp_resource: dict[str, Any] = {"server_label": server_label} - - # Add headers if they exist - if mcp_tool.headers: - mcp_resource["headers"] = mcp_tool.headers - - if mcp_tool.approval_mode is not None: - match mcp_tool.approval_mode: - case str(): - # Map agent framework approval modes to Azure AI approval modes - approval_mode = ( - "always" if mcp_tool.approval_mode == "always_require" else "never" - ) - mcp_resource["require_approval"] = approval_mode - case _: - if "always_require_approval" in mcp_tool.approval_mode: - mcp_resource["require_approval"] = { - "always": mcp_tool.approval_mode["always_require_approval"] - } - elif "never_require_approval" in mcp_tool.approval_mode: - mcp_resource["require_approval"] = { - "never": mcp_tool.approval_mode["never_require_approval"] - } - - mcp_resources.append(mcp_resource) - - # Add MCP resources to tool_resources - if "tool_resources" not in run_options: - run_options["tool_resources"] = {} - run_options["tool_resources"]["mcp"] = mcp_resources - - if chat_options.tool_choice == "none": - run_options["tool_choice"] = AgentsToolChoiceOptionMode.NONE - elif chat_options.tool_choice == "auto": - run_options["tool_choice"] = AgentsToolChoiceOptionMode.AUTO - elif ( - isinstance(chat_options.tool_choice, ToolMode) - and chat_options.tool_choice == "required" - and chat_options.tool_choice.required_function_name is not None - ): - run_options["tool_choice"] = AgentsNamedToolChoice( - type=AgentsNamedToolChoiceType.FUNCTION, - function=FunctionName(name=chat_options.tool_choice.required_function_name), - ) + # Use to_dict with exclusions for properties handled separately + run_options: dict[str, Any] = chat_options.to_dict( + exclude={ + "type", + "instructions", # handled via messages + "tools", # handled separately + "tool_choice", # handled separately + "response_format", # handled separately + "additional_properties", # handled separately + "frequency_penalty", # not supported + "presence_penalty", # not supported + "user", # not supported + "stop", # not supported + "logit_bias", # not supported + "seed", # not supported + "store", # not supported + } + ) - if tool_definitions: - run_options["tools"] = tool_definitions + # Translation between ChatOptions and Azure AI Agents API + translations = { + "model_id": "model", + "allow_multiple_tool_calls": "parallel_tool_calls", + "max_tokens": "max_completion_tokens", + } + for old_key, new_key in translations.items(): + if old_key in run_options and old_key != new_key: + run_options[new_key] = run_options.pop(old_key) + + # model id fallback + if not run_options.get("model"): + run_options["model"] = self.model_id + + # tools and tool_choice + if tool_definitions := await self._prepare_tool_definitions_and_resources( + chat_options, agent_definition, run_options + ): + run_options["tools"] = tool_definitions - if chat_options.response_format is not None: - run_options["response_format"] = ResponseFormatJsonSchemaType( - json_schema=ResponseFormatJsonSchema( - name=chat_options.response_format.__name__, - schema=chat_options.response_format.model_json_schema(), - ) + if tool_choice := self._prepare_tool_choice_mode(chat_options): + run_options["tool_choice"] = tool_choice + + # response format + if chat_options.response_format is not None: + run_options["response_format"] = ResponseFormatJsonSchemaType( + json_schema=ResponseFormatJsonSchema( + name=chat_options.response_format.__name__, + schema=chat_options.response_format.model_json_schema(), ) + ) + + # messages + additional_messages, instructions, required_action_results = self._prepare_messages(messages) + if additional_messages: + run_options["additional_messages"] = additional_messages + + # Add instruction from existing agent at the beginning + if ( + agent_definition is not None + and agent_definition.instructions + and agent_definition.instructions not in instructions + ): + instructions.insert(0, agent_definition.instructions) + + if instructions: + run_options["instructions"] = "\n".join(instructions) + + # thread_id resolution (conversation_id takes precedence, then kwargs, then instance default) + run_options["thread_id"] = chat_options.conversation_id or kwargs.get("conversation_id") or self.thread_id + + return run_options, required_action_results + + def _prepare_tool_choice_mode( + self, chat_options: ChatOptions + ) -> AgentsToolChoiceOptionMode | AgentsNamedToolChoice | None: + """Prepare the tool choice mode for Azure AI Agents API.""" + if chat_options.tool_choice is None: + return None + if chat_options.tool_choice == "none": + return AgentsToolChoiceOptionMode.NONE + if chat_options.tool_choice == "auto": + return AgentsToolChoiceOptionMode.AUTO + if ( + isinstance(chat_options.tool_choice, ToolMode) + and chat_options.tool_choice == "required" + and chat_options.tool_choice.required_function_name is not None + ): + return AgentsNamedToolChoice( + type=AgentsNamedToolChoiceType.FUNCTION, + function=FunctionName(name=chat_options.tool_choice.required_function_name), + ) + return None + + async def _prepare_tool_definitions_and_resources( + self, + chat_options: ChatOptions, + agent_definition: Agent | None, + run_options: dict[str, Any], + ) -> list[ToolDefinition | dict[str, Any]]: + """Prepare tool definitions and resources for the run options.""" + tool_definitions: list[ToolDefinition | dict[str, Any]] = [] + + # Add tools from existing agent (exclude function tools - passed via chat_options.tools) + if agent_definition is not None: + agent_tools = [tool for tool in agent_definition.tools if not isinstance(tool, FunctionToolDefinition)] + if agent_tools: + tool_definitions.extend(agent_tools) + if agent_definition.tool_resources: + run_options["tool_resources"] = agent_definition.tool_resources + + # Add run tools if tool_choice allows + if chat_options.tool_choice is not None and chat_options.tool_choice != "none" and chat_options.tools: + tool_definitions.extend(await self._prepare_tools_for_azure_ai(chat_options.tools, run_options)) + + # Handle MCP tool resources + mcp_resources = self._prepare_mcp_resources(chat_options.tools) + if mcp_resources: + if "tool_resources" not in run_options: + run_options["tool_resources"] = {} + run_options["tool_resources"]["mcp"] = mcp_resources + return tool_definitions + + def _prepare_mcp_resources( + self, tools: Sequence["ToolProtocol | MutableMapping[str, Any]"] + ) -> list[dict[str, Any]]: + """Prepare MCP tool resources for approval mode configuration.""" + mcp_tools = [tool for tool in tools if isinstance(tool, HostedMCPTool)] + if not mcp_tools: + return [] + + mcp_resources: list[dict[str, Any]] = [] + for mcp_tool in mcp_tools: + server_label = mcp_tool.name.replace(" ", "_") + mcp_resource: dict[str, Any] = {"server_label": server_label} + + if mcp_tool.headers: + mcp_resource["headers"] = mcp_tool.headers + + if mcp_tool.approval_mode is not None: + match mcp_tool.approval_mode: + case str(): + # Map agent framework approval modes to Azure AI approval modes + approval_mode = "always" if mcp_tool.approval_mode == "always_require" else "never" + mcp_resource["require_approval"] = approval_mode + case _: + if "always_require_approval" in mcp_tool.approval_mode: + mcp_resource["require_approval"] = { + "always": mcp_tool.approval_mode["always_require_approval"] + } + elif "never_require_approval" in mcp_tool.approval_mode: + mcp_resource["require_approval"] = { + "never": mcp_tool.approval_mode["never_require_approval"] + } + + mcp_resources.append(mcp_resource) + + return mcp_resources + + def _prepare_messages( + self, messages: MutableSequence[ChatMessage] + ) -> tuple[ + list[ThreadMessageOptions] | None, + list[str], + list[FunctionResultContent | FunctionApprovalResponseContent] | None, + ]: + """Prepare messages for Azure AI Agents API. + + System/developer messages are turned into instructions, since there is no such message roles in Azure AI. + All other messages are added 1:1, treating assistant messages as agent messages + and everything else as user messages. + + Returns: + Tuple of (additional_messages, instructions, required_action_results) + """ instructions: list[str] = [] required_action_results: list[FunctionResultContent | FunctionApprovalResponseContent] | None = None - additional_messages: list[ThreadMessageOptions] | None = None - # System/developer messages are turned into instructions, since there is no such message roles in Azure AI. - # All other messages are added 1:1, treating assistant messages as agent messages - # and everything else as user messages. for chat_message in messages: if chat_message.role.value in ["system", "developer"]: for text_content in [content for content in chat_message.contents if isinstance(content, TextContent)]: instructions.append(text_content.text) - continue message_contents: list[MessageInputContentBlock] = [] @@ -942,7 +1004,7 @@ async def _create_run_options( elif isinstance(content.raw_representation, MessageInputContentBlock): message_contents.append(content.raw_representation) - if len(message_contents) > 0: + if message_contents: if additional_messages is None: additional_messages = [] additional_messages.append( @@ -952,26 +1014,12 @@ async def _create_run_options( ) ) - if additional_messages is not None: - run_options["additional_messages"] = additional_messages - - # Add instruction from existing agent at the beginning - if ( - agent_definition is not None - and agent_definition.instructions - and agent_definition.instructions not in instructions - ): - instructions.insert(0, agent_definition.instructions) - - if len(instructions) > 0: - run_options["instructions"] = "".join(instructions) - - return run_options, required_action_results + return additional_messages, instructions, required_action_results - async def _prep_tools( + async def _prepare_tools_for_azure_ai( self, tools: Sequence["ToolProtocol | MutableMapping[str, Any]"], run_options: dict[str, Any] | None = None ) -> list[ToolDefinition | dict[str, Any]]: - """Prepare tool definitions for the run options.""" + """Prepare tool definitions for the Azure AI Agents API.""" tool_definitions: list[ToolDefinition | dict[str, Any]] = [] for tool in tools: match tool: @@ -1044,10 +1092,11 @@ async def _prep_tools( raise ServiceInitializationError(f"Unsupported tool type: {type(tool)}") return tool_definitions - def _convert_required_action_to_tool_output( + def _prepare_tool_outputs_for_azure_ai( self, required_action_results: list[FunctionResultContent | FunctionApprovalResponseContent] | None, ) -> tuple[str | None, list[ToolOutput] | None, list[ToolApproval] | None]: + """Prepare function results and approvals for submission to the Azure AI API.""" run_id: str | None = None tool_outputs: list[ToolOutput] | None = None tool_approvals: list[ToolApproval] | None = None diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_client.py index f4d5328f03..e10fc19068 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_client.py @@ -28,10 +28,6 @@ ) from azure.core.credentials_async import AsyncTokenCredential from azure.core.exceptions import ResourceNotFoundError -from openai.types.responses.parsed_response import ( - ParsedResponse, -) -from openai.types.responses.response import Response as OpenAIResponse from pydantic import BaseModel, ValidationError from ._shared import AzureAISettings @@ -41,6 +37,11 @@ else: from typing_extensions import Self # pragma: no cover +if sys.version_info >= (3, 12): + from typing import override # type: ignore # pragma: no cover +else: + from typing_extensions import override # type: ignore[import] # pragma: no cover + logger = get_logger("agent_framework.azure") @@ -368,53 +369,21 @@ async def _close_client_if_needed(self) -> None: if self._should_close_client: await self.project_client.close() - def _prepare_input(self, messages: MutableSequence[ChatMessage]) -> tuple[list[ChatMessage], str | None]: - """Prepare input from messages and convert system/developer messages to instructions.""" - result: list[ChatMessage] = [] - instructions_list: list[str] = [] - instructions: str | None = None - - # System/developer messages are turned into instructions, since there is no such message roles in Azure AI. - for message in messages: - if message.role.value in ["system", "developer"]: - for text_content in [content for content in message.contents if isinstance(content, TextContent)]: - instructions_list.append(text_content.text) - else: - result.append(message) - - if len(instructions_list) > 0: - instructions = "".join(instructions_list) - - return result, instructions - - async def prepare_options( + @override + async def _prepare_options( self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any, ) -> dict[str, Any]: """Take ChatOptions and create the specific options for Azure AI.""" - prepared_messages, instructions = self._prepare_input(messages) - run_options = await super().prepare_options(prepared_messages, chat_options, **kwargs) - + prepared_messages, instructions = self._prepare_messages_for_azure_ai(messages) + run_options = await super()._prepare_options(prepared_messages, chat_options, **kwargs) if not self._is_application_endpoint: # Application-scoped response APIs do not support "agent" property. agent_reference = await self._get_agent_reference_or_create(run_options, instructions) run_options["extra_body"] = {"agent": agent_reference} - conversation_id = chat_options.conversation_id or self.conversation_id - - # Handle different conversation ID formats - if conversation_id: - if conversation_id.startswith("resp_"): - # For response IDs, set previous_response_id and remove conversation property - run_options.pop("conversation", None) - run_options["previous_response_id"] = conversation_id - elif conversation_id.startswith("conv_"): - # For conversation IDs, set conversation and remove previous_response_id property - run_options.pop("previous_response_id", None) - run_options["conversation"] = conversation_id - # Remove properties that are not supported on request level # but were configured on agent level exclude = ["model", "tools", "response_format", "temperature", "top_p"] @@ -424,7 +393,33 @@ async def prepare_options( return run_options - async def initialize_client(self) -> None: + @override + def _get_current_conversation_id(self, chat_options: ChatOptions, **kwargs: Any) -> str | None: + """Get the current conversation ID from chat options or kwargs.""" + return chat_options.conversation_id or kwargs.get("conversation_id") or self.conversation_id + + def _prepare_messages_for_azure_ai( + self, messages: MutableSequence[ChatMessage] + ) -> tuple[list[ChatMessage], str | None]: + """Prepare input from messages and convert system/developer messages to instructions.""" + result: list[ChatMessage] = [] + instructions_list: list[str] = [] + instructions: str | None = None + + # System/developer messages are turned into instructions, since there is no such message roles in Azure AI. + for message in messages: + if message.role.value in ["system", "developer"]: + for text_content in [content for content in message.contents if isinstance(content, TextContent)]: + instructions_list.append(text_content.text) + else: + result.append(message) + + if len(instructions_list) > 0: + instructions = "".join(instructions_list) + + return result, instructions + + async def _initialize_client(self) -> None: """Initialize OpenAI client.""" self.client = self.project_client.get_openai_client() # type: ignore @@ -442,7 +437,8 @@ def _update_agent_name_and_description(self, agent_name: str | None, description if description and not self.agent_description: self.agent_description = description - def get_mcp_tool(self, tool: HostedMCPTool) -> Any: + @staticmethod + def _prepare_mcp_tool(tool: HostedMCPTool) -> MCPTool: # type: ignore[override] """Get MCP tool from HostedMCPTool.""" mcp = MCPTool(server_label=tool.name.replace(" ", "_"), server_url=str(tool.url)) @@ -460,17 +456,3 @@ def get_mcp_tool(self, tool: HostedMCPTool) -> Any: mcp["require_approval"] = {"never": {"tool_names": list(never_require_approvals)}} return mcp - - def get_conversation_id( - self, response: OpenAIResponse | ParsedResponse[BaseModel], store: bool | None - ) -> str | None: - """Get the conversation ID from the response if store is True.""" - if store is False: - return None - # If conversation ID exists, it means that we operate with conversation - # so we use conversation ID as input and output. - if response.conversation and response.conversation.id: - return response.conversation.id - # If conversation ID doesn't exist, we operate with responses - # so we use response ID as input and output. - return response.id diff --git a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py index f1b4dafb63..134a3586b0 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py @@ -367,33 +367,33 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_missing_model( await chat_client._get_agent_id_or_create() # type: ignore -async def test_azure_ai_chat_client_create_run_options_basic(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with basic ChatOptions.""" +async def test_azure_ai_chat_client_prepare_options_basic(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with basic ChatOptions.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) messages = [ChatMessage(role=Role.USER, text="Hello")] chat_options = ChatOptions(max_tokens=100, temperature=0.7) - run_options, tool_results = await chat_client._create_run_options(messages, chat_options) # type: ignore + run_options, tool_results = await chat_client._prepare_options(messages, chat_options) # type: ignore assert run_options is not None assert tool_results is None -async def test_azure_ai_chat_client_create_run_options_no_chat_options(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with no ChatOptions.""" +async def test_azure_ai_chat_client_prepare_options_no_chat_options(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with default ChatOptions.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) messages = [ChatMessage(role=Role.USER, text="Hello")] - run_options, tool_results = await chat_client._create_run_options(messages, None) # type: ignore + run_options, tool_results = await chat_client._prepare_options(messages, ChatOptions()) # type: ignore assert run_options is not None assert tool_results is None -async def test_azure_ai_chat_client_create_run_options_with_image_content(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with image content.""" +async def test_azure_ai_chat_client_prepare_options_with_image_content(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with image content.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -403,7 +403,7 @@ async def test_azure_ai_chat_client_create_run_options_with_image_content(mock_a image_content = UriContent(uri="https://example.com/image.jpg", media_type="image/jpeg") messages = [ChatMessage(role=Role.USER, contents=[image_content])] - run_options, _ = await chat_client._create_run_options(messages, None) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, ChatOptions()) # type: ignore assert "additional_messages" in run_options assert len(run_options["additional_messages"]) == 1 @@ -412,11 +412,11 @@ async def test_azure_ai_chat_client_create_run_options_with_image_content(mock_a assert len(message.content) == 1 -def test_azure_ai_chat_client_convert_function_results_to_tool_output_none(mock_agents_client: MagicMock) -> None: - """Test _convert_required_action_to_tool_output with None input.""" +def test_azure_ai_chat_client_prepare_tool_outputs_for_azure_ai_none(mock_agents_client: MagicMock) -> None: + """Test _prepare_tool_outputs_for_azure_ai with None input.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output(None) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai(None) # type: ignore assert run_id is None assert tool_outputs is None @@ -484,8 +484,8 @@ def test_azure_ai_chat_client_update_agent_name_and_description_with_none_input( assert chat_client.agent_description is None -async def test_azure_ai_chat_client_create_run_options_with_messages(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with different message types.""" +async def test_azure_ai_chat_client_prepare_options_with_messages(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with different message types.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) # Test with system message (becomes instruction) @@ -494,7 +494,7 @@ async def test_azure_ai_chat_client_create_run_options_with_messages(mock_agents ChatMessage(role=Role.USER, text="Hello"), ] - run_options, _ = await chat_client._create_run_options(messages, None) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, ChatOptions()) # type: ignore assert "instructions" in run_options assert "You are a helpful assistant" in run_options["instructions"] @@ -565,8 +565,8 @@ async def test_azure_ai_chat_client_prepare_thread_cancels_active_run(mock_agent mock_agents_client.runs.cancel.assert_called_once_with("test-thread", "run_123") -def test_azure_ai_chat_client_create_function_call_contents_basic(mock_agents_client: MagicMock) -> None: - """Test _create_function_call_contents with basic function call.""" +def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_basic(mock_agents_client: MagicMock) -> None: + """Test _parse_function_calls_from_azure_ai with basic function call.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) mock_tool_call = MagicMock(spec=RequiredFunctionToolCall) @@ -580,7 +580,7 @@ def test_azure_ai_chat_client_create_function_call_contents_basic(mock_agents_cl mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = mock_submit_action - result = chat_client._create_function_call_contents(mock_event_data, "response_123") # type: ignore + result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert len(result) == 1 assert isinstance(result[0], FunctionCallContent) @@ -588,22 +588,24 @@ def test_azure_ai_chat_client_create_function_call_contents_basic(mock_agents_cl assert result[0].call_id == '["response_123", "call_123"]' -def test_azure_ai_chat_client_create_function_call_contents_no_submit_action(mock_agents_client: MagicMock) -> None: - """Test _create_function_call_contents when required_action is not SubmitToolOutputsAction.""" +def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_no_submit_action( + mock_agents_client: MagicMock, +) -> None: + """Test _parse_function_calls_from_azure_ai when required_action is not SubmitToolOutputsAction.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = MagicMock() - result = chat_client._create_function_call_contents(mock_event_data, "response_123") # type: ignore + result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert result == [] -def test_azure_ai_chat_client_create_function_call_contents_non_function_tool_call( +def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_non_function_tool_call( mock_agents_client: MagicMock, ) -> None: - """Test _create_function_call_contents with non-function tool call.""" + """Test _parse_function_calls_from_azure_ai with non-function tool call.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) mock_tool_call = MagicMock() @@ -614,37 +616,37 @@ def test_azure_ai_chat_client_create_function_call_contents_non_function_tool_ca mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = mock_submit_action - result = chat_client._create_function_call_contents(mock_event_data, "response_123") # type: ignore + result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert result == [] -async def test_azure_ai_chat_client_create_run_options_with_none_tool_choice( +async def test_azure_ai_chat_client_prepare_options_with_none_tool_choice( mock_agents_client: MagicMock, ) -> None: - """Test _create_run_options with tool_choice set to 'none'.""" + """Test _prepare_options with tool_choice set to 'none'.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) chat_options = ChatOptions() chat_options.tool_choice = "none" - run_options, _ = await chat_client._create_run_options([], chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore from azure.ai.agents.models import AgentsToolChoiceOptionMode assert run_options["tool_choice"] == AgentsToolChoiceOptionMode.NONE -async def test_azure_ai_chat_client_create_run_options_with_auto_tool_choice( +async def test_azure_ai_chat_client_prepare_options_with_auto_tool_choice( mock_agents_client: MagicMock, ) -> None: - """Test _create_run_options with tool_choice set to 'auto'.""" + """Test _prepare_options with tool_choice set to 'auto'.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) chat_options = ChatOptions() chat_options.tool_choice = "auto" - run_options, _ = await chat_client._create_run_options([], chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore from azure.ai.agents.models import AgentsToolChoiceOptionMode @@ -669,10 +671,10 @@ async def test_azure_ai_chat_client_prepare_tool_choice_none_string( assert chat_options.tool_choice == ToolMode.NONE.mode -async def test_azure_ai_chat_client_create_run_options_tool_choice_required_specific_function( +async def test_azure_ai_chat_client_prepare_options_tool_choice_required_specific_function( mock_agents_client: MagicMock, ) -> None: - """Test _create_run_options with ToolMode.REQUIRED specifying a specific function name.""" + """Test _prepare_options with ToolMode.REQUIRED specifying a specific function name.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) required_tool_mode = ToolMode.REQUIRED("specific_function_name") @@ -682,7 +684,7 @@ async def test_azure_ai_chat_client_create_run_options_tool_choice_required_spec chat_options = ChatOptions(tools=[dict_tool], tool_choice=required_tool_mode) messages = [ChatMessage(role=Role.USER, text="Hello")] - run_options, _ = await chat_client._create_run_options(messages, chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore # Verify tool_choice is set to the specific named function assert "tool_choice" in run_options @@ -692,10 +694,10 @@ async def test_azure_ai_chat_client_create_run_options_tool_choice_required_spec assert tool_choice.function.name == "specific_function_name" # type: ignore -async def test_azure_ai_chat_client_create_run_options_with_response_format( +async def test_azure_ai_chat_client_prepare_options_with_response_format( mock_agents_client: MagicMock, ) -> None: - """Test _create_run_options with response_format configured.""" + """Test _prepare_options with response_format configured.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) class TestResponseModel(BaseModel): @@ -704,7 +706,7 @@ class TestResponseModel(BaseModel): chat_options = ChatOptions() chat_options.response_format = TestResponseModel - run_options, _ = await chat_client._create_run_options([], chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options([], chat_options) # type: ignore assert "response_format" in run_options response_format = run_options["response_format"] @@ -720,8 +722,8 @@ def test_azure_ai_chat_client_service_url_method(mock_agents_client: MagicMock) assert url == "https://test-endpoint.com/" -async def test_azure_ai_chat_client_prep_tools_ai_function(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with AIFunction tool.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_ai_function(mock_agents_client: MagicMock) -> None: + """Test _prepare_tools_for_azure_ai with AIFunction tool.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -729,28 +731,28 @@ async def test_azure_ai_chat_client_prep_tools_ai_function(mock_agents_client: M mock_ai_function = MagicMock(spec=AIFunction) mock_ai_function.to_json_schema_spec.return_value = {"type": "function", "function": {"name": "test_function"}} - result = await chat_client._prep_tools([mock_ai_function]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([mock_ai_function]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "function", "function": {"name": "test_function"}} mock_ai_function.to_json_schema_spec.assert_called_once() -async def test_azure_ai_chat_client_prep_tools_code_interpreter(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with HostedCodeInterpreterTool.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_code_interpreter(mock_agents_client: MagicMock) -> None: + """Test _prepare_tools_for_azure_ai with HostedCodeInterpreterTool.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") code_interpreter_tool = HostedCodeInterpreterTool() - result = await chat_client._prep_tools([code_interpreter_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([code_interpreter_tool]) # type: ignore assert len(result) == 1 assert isinstance(result[0], CodeInterpreterToolDefinition) -async def test_azure_ai_chat_client_prep_tools_mcp_tool(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with HostedMCPTool.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_mcp_tool(mock_agents_client: MagicMock) -> None: + """Test _prepare_tools_for_azure_ai with HostedMCPTool.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -762,7 +764,7 @@ async def test_azure_ai_chat_client_prep_tools_mcp_tool(mock_agents_client: Magi mock_mcp_tool.definitions = [{"type": "mcp", "name": "test_mcp"}] mock_mcp_tool_class.return_value = mock_mcp_tool - result = await chat_client._prep_tools([mcp_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([mcp_tool]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "mcp", "name": "test_mcp"} @@ -774,8 +776,8 @@ async def test_azure_ai_chat_client_prep_tools_mcp_tool(mock_agents_client: Magi assert set(call_args["allowed_tools"]) == {"tool1", "tool2"} -async def test_azure_ai_chat_client_create_run_options_mcp_never_require(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with HostedMCPTool having never_require approval mode.""" +async def test_azure_ai_chat_client_prepare_options_mcp_never_require(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with HostedMCPTool having never_require approval mode.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) mcp_tool = HostedMCPTool(name="Test MCP Tool", url="https://example.com/mcp", approval_mode="never_require") @@ -784,12 +786,12 @@ async def test_azure_ai_chat_client_create_run_options_mcp_never_require(mock_ag chat_options = ChatOptions(tools=[mcp_tool], tool_choice="auto") with patch("agent_framework_azure_ai._chat_client.McpTool") as mock_mcp_tool_class: - # Mock _prep_tools to avoid actual tool preparation + # Mock _prepare_tools_for_azure_ai to avoid actual tool preparation mock_mcp_tool_instance = MagicMock() mock_mcp_tool_instance.definitions = [{"type": "mcp", "name": "test_mcp"}] mock_mcp_tool_class.return_value = mock_mcp_tool_instance - run_options, _ = await chat_client._create_run_options(messages, chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore # Verify tool_resources is created with correct MCP approval structure assert "tool_resources" in run_options, ( @@ -803,8 +805,8 @@ async def test_azure_ai_chat_client_create_run_options_mcp_never_require(mock_ag assert mcp_resource["require_approval"] == "never" -async def test_azure_ai_chat_client_create_run_options_mcp_with_headers(mock_agents_client: MagicMock) -> None: - """Test _create_run_options with HostedMCPTool having headers.""" +async def test_azure_ai_chat_client_prepare_options_mcp_with_headers(mock_agents_client: MagicMock) -> None: + """Test _prepare_options with HostedMCPTool having headers.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client) # Test with headers @@ -817,12 +819,12 @@ async def test_azure_ai_chat_client_create_run_options_mcp_with_headers(mock_age chat_options = ChatOptions(tools=[mcp_tool], tool_choice="auto") with patch("agent_framework_azure_ai._chat_client.McpTool") as mock_mcp_tool_class: - # Mock _prep_tools to avoid actual tool preparation + # Mock _prepare_tools_for_azure_ai to avoid actual tool preparation mock_mcp_tool_instance = MagicMock() mock_mcp_tool_instance.definitions = [{"type": "mcp", "name": "test_mcp"}] mock_mcp_tool_class.return_value = mock_mcp_tool_instance - run_options, _ = await chat_client._create_run_options(messages, chat_options) # type: ignore + run_options, _ = await chat_client._prepare_options(messages, chat_options) # type: ignore # Verify tool_resources is created with headers assert "tool_resources" in run_options @@ -835,8 +837,10 @@ async def test_azure_ai_chat_client_create_run_options_mcp_with_headers(mock_age assert mcp_resource["headers"] == headers -async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with HostedWebSearchTool using Bing Grounding.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_bing_grounding( + mock_agents_client: MagicMock, +) -> None: + """Test _prepare_tools_for_azure_ai with HostedWebSearchTool using Bing Grounding.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -856,7 +860,7 @@ async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding(mock_ag mock_bing_tool.definitions = [{"type": "bing_grounding"}] mock_bing_grounding.return_value = mock_bing_tool - result = await chat_client._prep_tools([web_search_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "bing_grounding"} @@ -868,10 +872,10 @@ async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding(mock_ag assert "connection_id" in call_args -async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding_with_connection_id( +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_bing_grounding_with_connection_id( mock_agents_client: MagicMock, ) -> None: - """Test _prep_tools with HostedWebSearchTool using Bing Grounding with connection_id (no HTTP call).""" + """Test _prepare_tools_... with HostedWebSearchTool using Bing Grounding with connection_id (no HTTP call).""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -888,15 +892,17 @@ async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding_with_co mock_bing_tool.definitions = [{"type": "bing_grounding"}] mock_bing_grounding.return_value = mock_bing_tool - result = await chat_client._prep_tools([web_search_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "bing_grounding"} mock_bing_grounding.assert_called_once_with(connection_id="direct-connection-id", count=3) -async def test_azure_ai_chat_client_prep_tools_web_search_custom_bing(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with HostedWebSearchTool using Custom Bing Search.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_custom_bing( + mock_agents_client: MagicMock, +) -> None: + """Test _prepare_tools_for_azure_ai with HostedWebSearchTool using Custom Bing Search.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -914,16 +920,16 @@ async def test_azure_ai_chat_client_prep_tools_web_search_custom_bing(mock_agent mock_custom_tool.definitions = [{"type": "bing_custom_search"}] mock_custom_bing.return_value = mock_custom_tool - result = await chat_client._prep_tools([web_search_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore assert len(result) == 1 assert result[0] == {"type": "bing_custom_search"} -async def test_azure_ai_chat_client_prep_tools_file_search_with_vector_stores( +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_file_search_with_vector_stores( mock_agents_client: MagicMock, ) -> None: - """Test _prep_tools with HostedFileSearchTool using vector stores.""" + """Test _prepare_tools_for_azure_ai with HostedFileSearchTool using vector stores.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -938,7 +944,7 @@ async def test_azure_ai_chat_client_prep_tools_file_search_with_vector_stores( mock_file_search.return_value = mock_file_tool run_options = {} - result = await chat_client._prep_tools([file_search_tool], run_options) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([file_search_tool], run_options) # type: ignore assert len(result) == 1 assert result[0] == {"type": "file_search"} @@ -973,7 +979,7 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_approvals( with patch("azure.ai.agents.models.AsyncAgentEventHandler", return_value=mock_handler): stream, final_thread_id = await chat_client._create_agent_stream( # type: ignore - "test-thread", "test-agent", {}, [approval_response] + "test-agent", {"thread_id": "test-thread"}, [approval_response] ) # Verify the approvals path was taken @@ -987,26 +993,26 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_approvals( assert call_args["tool_approvals"][0].approve is True -async def test_azure_ai_chat_client_prep_tools_dict_tool(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with dictionary tool definition.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_dict_tool(mock_agents_client: MagicMock) -> None: + """Test _prepare_tools_for_azure_ai with dictionary tool definition.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") dict_tool = {"type": "custom_tool", "config": {"param": "value"}} - result = await chat_client._prep_tools([dict_tool]) # type: ignore + result = await chat_client._prepare_tools_for_azure_ai([dict_tool]) # type: ignore assert len(result) == 1 assert result[0] == dict_tool -async def test_azure_ai_chat_client_prep_tools_unsupported_tool(mock_agents_client: MagicMock) -> None: - """Test _prep_tools with unsupported tool type.""" +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_unsupported_tool(mock_agents_client: MagicMock) -> None: + """Test _prepare_tools_for_azure_ai with unsupported tool type.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") unsupported_tool = "not_a_tool" with pytest.raises(ServiceInitializationError, match="Unsupported tool type: "): - await chat_client._prep_tools([unsupported_tool]) # type: ignore + await chat_client._prepare_tools_for_azure_ai([unsupported_tool]) # type: ignore async def test_azure_ai_chat_client_get_active_thread_run_with_active_run(mock_agents_client: MagicMock) -> None: @@ -1072,16 +1078,16 @@ async def test_azure_ai_chat_client_service_url(mock_agents_client: MagicMock) - assert result == "https://test-endpoint.com/" -async def test_azure_ai_chat_client_convert_required_action_to_tool_output_function_result( +async def test_azure_ai_chat_client_prepare_tool_outputs_for_azure_ai_function_result( mock_agents_client: MagicMock, ) -> None: - """Test _convert_required_action_to_tool_output with FunctionResultContent.""" + """Test _prepare_tool_outputs_for_azure_ai with FunctionResultContent.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Test with simple result function_result = FunctionResultContent(call_id='["run_123", "call_456"]', result="Simple result") - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore assert run_id == "run_123" assert tool_approvals is None @@ -1092,7 +1098,7 @@ async def test_azure_ai_chat_client_convert_required_action_to_tool_output_funct async def test_azure_ai_chat_client_convert_required_action_invalid_call_id(mock_agents_client: MagicMock) -> None: - """Test _convert_required_action_to_tool_output with invalid call_id format.""" + """Test _prepare_tool_outputs_for_azure_ai with invalid call_id format.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") @@ -1100,19 +1106,19 @@ async def test_azure_ai_chat_client_convert_required_action_invalid_call_id(mock function_result = FunctionResultContent(call_id="invalid_json", result="result") with pytest.raises(json.JSONDecodeError): - chat_client._convert_required_action_to_tool_output([function_result]) # type: ignore + chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore async def test_azure_ai_chat_client_convert_required_action_invalid_structure( mock_agents_client: MagicMock, ) -> None: - """Test _convert_required_action_to_tool_output with invalid call_id structure.""" + """Test _prepare_tool_outputs_for_azure_ai with invalid call_id structure.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Valid JSON but invalid structure (missing second element) function_result = FunctionResultContent(call_id='["run_123"]', result="result") - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore # Should return None values when structure is invalid assert run_id is None @@ -1123,7 +1129,7 @@ async def test_azure_ai_chat_client_convert_required_action_invalid_structure( async def test_azure_ai_chat_client_convert_required_action_serde_model_results( mock_agents_client: MagicMock, ) -> None: - """Test _convert_required_action_to_tool_output with BaseModel results.""" + """Test _prepare_tool_outputs_for_azure_ai with BaseModel results.""" class MockResult(SerializationMixin): def __init__(self, name: str, value: int): @@ -1136,7 +1142,7 @@ def __init__(self, name: str, value: int): mock_result = MockResult(name="test", value=42) function_result = FunctionResultContent(call_id='["run_123", "call_456"]', result=mock_result) - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore assert run_id == "run_123" assert tool_approvals is None @@ -1151,7 +1157,7 @@ def __init__(self, name: str, value: int): async def test_azure_ai_chat_client_convert_required_action_multiple_results( mock_agents_client: MagicMock, ) -> None: - """Test _convert_required_action_to_tool_output with multiple results.""" + """Test _prepare_tool_outputs_for_azure_ai with multiple results.""" class MockResult(SerializationMixin): def __init__(self, data: str): @@ -1164,7 +1170,7 @@ def __init__(self, data: str): results_list = [mock_basemodel, {"key": "value"}, "string_result"] function_result = FunctionResultContent(call_id='["run_123", "call_456"]', result=results_list) - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output([function_result]) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore assert run_id == "run_123" assert tool_outputs is not None @@ -1184,7 +1190,7 @@ def __init__(self, data: str): async def test_azure_ai_chat_client_convert_required_action_approval_response( mock_agents_client: MagicMock, ) -> None: - """Test _convert_required_action_to_tool_output with FunctionApprovalResponseContent.""" + """Test _prepare_tool_outputs_for_azure_ai with FunctionApprovalResponseContent.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Test with approval response - need to provide required fields @@ -1194,7 +1200,7 @@ async def test_azure_ai_chat_client_convert_required_action_approval_response( approved=True, ) - run_id, tool_outputs, tool_approvals = chat_client._convert_required_action_to_tool_output([approval_response]) # type: ignore + run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([approval_response]) # type: ignore assert run_id == "run_123" assert tool_outputs is None @@ -1204,10 +1210,10 @@ async def test_azure_ai_chat_client_convert_required_action_approval_response( assert tool_approvals[0].approve is True -async def test_azure_ai_chat_client_create_function_call_contents_approval_request( +async def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_approval_request( mock_agents_client: MagicMock, ) -> None: - """Test _create_function_call_contents with approval action.""" + """Test _parse_function_calls_from_azure_ai with approval action.""" chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Mock SubmitToolApprovalAction with RequiredMcpToolCall @@ -1222,7 +1228,7 @@ async def test_azure_ai_chat_client_create_function_call_contents_approval_reque mock_event_data = MagicMock(spec=ThreadRun) mock_event_data.required_action = mock_approval_action - result = chat_client._create_function_call_contents(mock_event_data, "response_123") # type: ignore + result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert len(result) == 1 assert isinstance(result[0], FunctionApprovalRequestContent) @@ -1312,7 +1318,7 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_outputs( with patch("azure.ai.agents.models.AsyncAgentEventHandler", return_value=mock_handler): stream, final_thread_id = await chat_client._create_agent_stream( # type: ignore - thread_id="test-thread", agent_id="test-agent", run_options={}, required_action_results=[function_result] + agent_id="test-agent", run_options={"thread_id": "test-thread"}, required_action_results=[function_result] ) # Should call submit_tool_outputs_stream since we have matching run ID diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index 079b93d8c8..028e8fbdb8 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -249,10 +249,10 @@ async def test_azure_ai_client_get_agent_reference_missing_model( await client._get_agent_reference_or_create({}, None) # type: ignore -async def test_azure_ai_client_prepare_input_with_system_messages( +async def test_azure_ai_client_prepare_messages_for_azure_ai_with_system_messages( mock_project_client: MagicMock, ) -> None: - """Test _prepare_input converts system/developer messages to instructions.""" + """Test _prepare_messages_for_azure_ai converts system/developer messages to instructions.""" client = create_test_azure_ai_client(mock_project_client) messages = [ @@ -261,7 +261,7 @@ async def test_azure_ai_client_prepare_input_with_system_messages( ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text="System response")]), ] - result_messages, instructions = client._prepare_input(messages) # type: ignore + result_messages, instructions = client._prepare_messages_for_azure_ai(messages) # type: ignore assert len(result_messages) == 2 assert result_messages[0].role == Role.USER @@ -269,10 +269,10 @@ async def test_azure_ai_client_prepare_input_with_system_messages( assert instructions == "You are a helpful assistant." -async def test_azure_ai_client_prepare_input_no_system_messages( +async def test_azure_ai_client_prepare_messages_for_azure_ai_no_system_messages( mock_project_client: MagicMock, ) -> None: - """Test _prepare_input with no system/developer messages.""" + """Test _prepare_messages_for_azure_ai with no system/developer messages.""" client = create_test_azure_ai_client(mock_project_client) messages = [ @@ -280,7 +280,7 @@ async def test_azure_ai_client_prepare_input_no_system_messages( ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text="Hi there!")]), ] - result_messages, instructions = client._prepare_input(messages) # type: ignore + result_messages, instructions = client._prepare_messages_for_azure_ai(messages) # type: ignore assert len(result_messages) == 2 assert instructions is None @@ -294,14 +294,14 @@ async def test_azure_ai_client_prepare_options_basic(mock_project_client: MagicM chat_options = ChatOptions() with ( - patch.object(client.__class__.__bases__[0], "prepare_options", return_value={"model": "test-model"}), + patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), patch.object( client, "_get_agent_reference_or_create", return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, ), ): - run_options = await client.prepare_options(messages, chat_options) + run_options = await client._prepare_options(messages, chat_options) assert "extra_body" in run_options assert run_options["extra_body"]["agent"]["name"] == "test-agent" @@ -329,14 +329,14 @@ async def test_azure_ai_client_prepare_options_with_application_endpoint( chat_options = ChatOptions() with ( - patch.object(client.__class__.__bases__[0], "prepare_options", return_value={"model": "test-model"}), + patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), patch.object( client, "_get_agent_reference_or_create", return_value={"name": "test-agent", "version": "1", "type": "agent_reference"}, ), ): - run_options = await client.prepare_options(messages, chat_options) + run_options = await client._prepare_options(messages, chat_options) if expects_agent: assert "extra_body" in run_options @@ -369,14 +369,14 @@ async def test_azure_ai_client_prepare_options_with_application_project_client( chat_options = ChatOptions() with ( - patch.object(client.__class__.__bases__[0], "prepare_options", return_value={"model": "test-model"}), + patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), patch.object( client, "_get_agent_reference_or_create", return_value={"name": "test-agent", "version": "1", "type": "agent_reference"}, ), ): - run_options = await client.prepare_options(messages, chat_options) + run_options = await client._prepare_options(messages, chat_options) if expects_agent: assert "extra_body" in run_options @@ -386,13 +386,13 @@ async def test_azure_ai_client_prepare_options_with_application_project_client( async def test_azure_ai_client_initialize_client(mock_project_client: MagicMock) -> None: - """Test initialize_client method.""" + """Test _initialize_client method.""" client = create_test_azure_ai_client(mock_project_client) mock_openai_client = MagicMock() mock_project_client.get_openai_client = MagicMock(return_value=mock_openai_client) - await client.initialize_client() + await client._initialize_client() assert client.client is mock_openai_client mock_project_client.get_openai_client.assert_called_once() @@ -727,7 +727,7 @@ async def test_azure_ai_client_prepare_options_excludes_response_format( with ( patch.object( client.__class__.__bases__[0], - "prepare_options", + "_prepare_options", return_value={"model": "test-model", "response_format": ResponseFormatModel}, ), patch.object( @@ -736,7 +736,7 @@ async def test_azure_ai_client_prepare_options_excludes_response_format( return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, ), ): - run_options = await client.prepare_options(messages, chat_options) + run_options = await client._prepare_options(messages, chat_options) # response_format should be excluded from final run options assert "response_format" not in run_options @@ -745,94 +745,8 @@ async def test_azure_ai_client_prepare_options_excludes_response_format( assert run_options["extra_body"]["agent"]["name"] == "test-agent" -async def test_azure_ai_client_prepare_options_with_resp_conversation_id( - mock_project_client: MagicMock, -) -> None: - """Test prepare_options with conversation ID starting with 'resp_'.""" - client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") - - messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] - chat_options = ChatOptions(conversation_id="resp_12345") - - with ( - patch.object( - client.__class__.__bases__[0], - "prepare_options", - return_value={"model": "test-model", "previous_response_id": "old_value", "conversation": "old_conv"}, - ), - patch.object( - client, - "_get_agent_reference_or_create", - return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, - ), - ): - run_options = await client.prepare_options(messages, chat_options) - - # Should set previous_response_id and remove conversation property - assert run_options["previous_response_id"] == "resp_12345" - assert "conversation" not in run_options - - -async def test_azure_ai_client_prepare_options_with_conv_conversation_id( - mock_project_client: MagicMock, -) -> None: - """Test prepare_options with conversation ID starting with 'conv_'.""" - client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") - - messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] - chat_options = ChatOptions(conversation_id="conv_67890") - - with ( - patch.object( - client.__class__.__bases__[0], - "prepare_options", - return_value={"model": "test-model", "previous_response_id": "old_value", "conversation": "old_conv"}, - ), - patch.object( - client, - "_get_agent_reference_or_create", - return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, - ), - ): - run_options = await client.prepare_options(messages, chat_options) - - # Should set conversation and remove previous_response_id property - assert run_options["conversation"] == "conv_67890" - assert "previous_response_id" not in run_options - - -async def test_azure_ai_client_prepare_options_with_client_conversation_id( - mock_project_client: MagicMock, -) -> None: - """Test prepare_options using client's default conversation ID when chat options don't have one.""" - client = create_test_azure_ai_client( - mock_project_client, agent_name="test-agent", agent_version="1.0", conversation_id="resp_client_default" - ) - - messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] - chat_options = ChatOptions() # No conversation_id specified - - with ( - patch.object( - client.__class__.__bases__[0], - "prepare_options", - return_value={"model": "test-model", "previous_response_id": "old_value", "conversation": "old_conv"}, - ), - patch.object( - client, - "_get_agent_reference_or_create", - return_value={"name": "test-agent", "version": "1.0", "type": "agent_reference"}, - ), - ): - run_options = await client.prepare_options(messages, chat_options) - - # Should use client's default conversation_id and set previous_response_id - assert run_options["previous_response_id"] == "resp_client_default" - assert "conversation" not in run_options - - def test_get_conversation_id_with_store_true_and_conversation_id() -> None: - """Test get_conversation_id returns conversation ID when store is True and conversation exists.""" + """Test _get_conversation_id returns conversation ID when store is True and conversation exists.""" client = create_test_azure_ai_client(MagicMock()) # Mock OpenAI response with conversation @@ -842,13 +756,13 @@ def test_get_conversation_id_with_store_true_and_conversation_id() -> None: mock_conversation.id = "conv_67890" mock_response.conversation = mock_conversation - result = client.get_conversation_id(mock_response, store=True) + result = client._get_conversation_id(mock_response, store=True) assert result == "conv_67890" def test_get_conversation_id_with_store_true_and_no_conversation() -> None: - """Test get_conversation_id returns response ID when store is True and no conversation exists.""" + """Test _get_conversation_id returns response ID when store is True and no conversation exists.""" client = create_test_azure_ai_client(MagicMock()) # Mock OpenAI response without conversation @@ -856,13 +770,13 @@ def test_get_conversation_id_with_store_true_and_no_conversation() -> None: mock_response.id = "resp_12345" mock_response.conversation = None - result = client.get_conversation_id(mock_response, store=True) + result = client._get_conversation_id(mock_response, store=True) assert result == "resp_12345" def test_get_conversation_id_with_store_true_and_empty_conversation_id() -> None: - """Test get_conversation_id returns response ID when store is True and conversation ID is empty.""" + """Test _get_conversation_id returns response ID when store is True and conversation ID is empty.""" client = create_test_azure_ai_client(MagicMock()) # Mock OpenAI response with conversation but empty ID @@ -872,13 +786,13 @@ def test_get_conversation_id_with_store_true_and_empty_conversation_id() -> None mock_conversation.id = "" mock_response.conversation = mock_conversation - result = client.get_conversation_id(mock_response, store=True) + result = client._get_conversation_id(mock_response, store=True) assert result == "resp_12345" def test_get_conversation_id_with_store_false() -> None: - """Test get_conversation_id returns None when store is False.""" + """Test _get_conversation_id returns None when store is False.""" client = create_test_azure_ai_client(MagicMock()) # Mock OpenAI response with conversation @@ -888,13 +802,13 @@ def test_get_conversation_id_with_store_false() -> None: mock_conversation.id = "conv_67890" mock_response.conversation = mock_conversation - result = client.get_conversation_id(mock_response, store=False) + result = client._get_conversation_id(mock_response, store=False) assert result is None def test_get_conversation_id_with_parsed_response_and_store_true() -> None: - """Test get_conversation_id works with ParsedResponse when store is True.""" + """Test _get_conversation_id works with ParsedResponse when store is True.""" client = create_test_azure_ai_client(MagicMock()) # Mock ParsedResponse with conversation @@ -904,13 +818,13 @@ def test_get_conversation_id_with_parsed_response_and_store_true() -> None: mock_conversation.id = "conv_parsed_67890" mock_response.conversation = mock_conversation - result = client.get_conversation_id(mock_response, store=True) + result = client._get_conversation_id(mock_response, store=True) assert result == "conv_parsed_67890" def test_get_conversation_id_with_parsed_response_no_conversation() -> None: - """Test get_conversation_id returns response ID with ParsedResponse when no conversation exists.""" + """Test _get_conversation_id returns response ID with ParsedResponse when no conversation exists.""" client = create_test_azure_ai_client(MagicMock()) # Mock ParsedResponse without conversation @@ -918,7 +832,7 @@ def test_get_conversation_id_with_parsed_response_no_conversation() -> None: mock_response.id = "resp_parsed_12345" mock_response.conversation = None - result = client.get_conversation_id(mock_response, store=True) + result = client._get_conversation_id(mock_response, store=True) assert result == "resp_parsed_12345" diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py index 506a1be7cd..bfb2c3f7d4 100644 --- a/python/packages/core/agent_framework/_clients.py +++ b/python/packages/core/agent_framework/_clients.py @@ -501,7 +501,7 @@ async def get_response( stop: str | Sequence[str] | None = None, store: bool | None = None, temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, + tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] @@ -535,6 +535,7 @@ async def get_response( store: Whether to store the response. temperature: The sampling temperature to use. tool_choice: The tool choice for the request. + Default is `auto`. tools: The tools to use for the request. top_p: The nucleus sampling probability to use. user: The user to associate with the request. @@ -595,7 +596,7 @@ async def get_streaming_response( stop: str | Sequence[str] | None = None, store: bool | None = None, temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, + tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] @@ -629,6 +630,7 @@ async def get_streaming_response( store: Whether to store the response. temperature: The sampling temperature to use. tool_choice: The tool choice for the request. + Default is `auto`. tools: The tools to use for the request. top_p: The nucleus sampling probability to use. user: The user to associate with the request. diff --git a/python/packages/core/agent_framework/_mcp.py b/python/packages/core/agent_framework/_mcp.py index 37e0d2c54b..a25f359a59 100644 --- a/python/packages/core/agent_framework/_mcp.py +++ b/python/packages/core/agent_framework/_mcp.py @@ -63,21 +63,21 @@ ] -def _mcp_prompt_message_to_chat_message( +def _parse_message_from_mcp( mcp_type: types.PromptMessage | types.SamplingMessage, ) -> ChatMessage: - """Convert a MCP container type to a Agent Framework type.""" + """Parse an MCP container type into an Agent Framework type.""" return ChatMessage( role=Role(value=mcp_type.role), - contents=_mcp_type_to_ai_content(mcp_type.content), + contents=_parse_content_from_mcp(mcp_type.content), raw_representation=mcp_type, ) -def _mcp_call_tool_result_to_ai_contents( +def _parse_contents_from_mcp_tool_result( mcp_type: types.CallToolResult, ) -> list[Contents]: - """Convert a MCP container type to a Agent Framework type. + """Parse an MCP CallToolResult into Agent Framework content types. This function extracts the complete _meta field from CallToolResult objects and merges all metadata into the additional_properties field of converted @@ -111,7 +111,7 @@ def _mcp_call_tool_result_to_ai_contents( # Convert each content item and merge metadata result_contents = [] for item in mcp_type.content: - contents = _mcp_type_to_ai_content(item) + contents = _parse_content_from_mcp(item) if merged_meta_props: for content in contents: @@ -124,7 +124,7 @@ def _mcp_call_tool_result_to_ai_contents( return result_contents -def _mcp_type_to_ai_content( +def _parse_content_from_mcp( mcp_type: types.ImageContent | types.TextContent | types.AudioContent @@ -142,7 +142,7 @@ def _mcp_type_to_ai_content( | types.ToolResultContent ], ) -> list[Contents]: - """Convert a MCP type to a Agent Framework type.""" + """Parse an MCP type into an Agent Framework type.""" mcp_types = mcp_type if isinstance(mcp_type, Sequence) else [mcp_type] return_types: list[Contents] = [] for mcp_type in mcp_types: @@ -178,7 +178,7 @@ def _mcp_type_to_ai_content( return_types.append( FunctionResultContent( call_id=mcp_type.toolUseId, - result=_mcp_type_to_ai_content(mcp_type.content) + result=_parse_content_from_mcp(mcp_type.content) if mcp_type.content else mcp_type.structuredContent, exception=Exception() if mcp_type.isError else None, @@ -211,10 +211,10 @@ def _mcp_type_to_ai_content( return return_types -def _ai_content_to_mcp_types( +def _prepare_content_for_mcp( content: Contents, ) -> types.TextContent | types.ImageContent | types.AudioContent | types.EmbeddedResource | types.ResourceLink | None: - """Convert a BaseContent type to a MCP type.""" + """Prepare an Agent Framework content type for MCP.""" match content: case TextContent(): return types.TextContent(type="text", text=content.text) @@ -253,15 +253,15 @@ def _ai_content_to_mcp_types( return None -def _chat_message_to_mcp_types( +def _prepare_message_for_mcp( content: ChatMessage, ) -> list[types.TextContent | types.ImageContent | types.AudioContent | types.EmbeddedResource | types.ResourceLink]: - """Convert a ChatMessage to a list of MCP types.""" + """Prepare a ChatMessage for MCP format.""" messages: list[ types.TextContent | types.ImageContent | types.AudioContent | types.EmbeddedResource | types.ResourceLink ] = [] for item in content.contents: - mcp_content = _ai_content_to_mcp_types(item) + mcp_content = _prepare_content_for_mcp(item) if mcp_content: messages.append(mcp_content) return messages @@ -469,7 +469,7 @@ async def sampling_callback( logger.debug("Sampling callback called with params: %s", params) messages: list[ChatMessage] = [] for msg in params.messages: - messages.append(_mcp_prompt_message_to_chat_message(msg)) + messages.append(_parse_message_from_mcp(msg)) try: response = await self.chat_client.get_response( messages, @@ -487,7 +487,7 @@ async def sampling_callback( code=types.INTERNAL_ERROR, message="Failed to get chat message content.", ) - mcp_contents = _chat_message_to_mcp_types(response.messages[0]) + mcp_contents = _prepare_message_for_mcp(response.messages[0]) # grab the first content that is of type TextContent or ImageContent mcp_content = next( (content for content in mcp_contents if isinstance(content, (types.TextContent, types.ImageContent))), @@ -692,7 +692,7 @@ async def call_tool(self, tool_name: str, **kwargs: Any) -> list[Contents]: k: v for k, v in kwargs.items() if k not in {"chat_options", "tools", "tool_choice", "thread"} } try: - return _mcp_call_tool_result_to_ai_contents( + return _parse_contents_from_mcp_tool_result( await self.session.call_tool(tool_name, arguments=filtered_kwargs) ) except McpError as mcp_exc: @@ -724,7 +724,7 @@ async def get_prompt(self, prompt_name: str, **kwargs: Any) -> list[ChatMessage] ) try: prompt_result = await self.session.get_prompt(prompt_name, arguments=kwargs) - return [_mcp_prompt_message_to_chat_message(message) for message in prompt_result.messages] + return [_parse_message_from_mcp(message) for message in prompt_result.messages] except McpError as mcp_exc: raise ToolExecutionException(mcp_exc.error.message, inner_exception=mcp_exc) from mcp_exc except Exception as ex: diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py index 2f7801c84b..dd14f0dec8 100644 --- a/python/packages/core/agent_framework/_tools.py +++ b/python/packages/core/agent_framework/_tools.py @@ -1779,11 +1779,6 @@ async def function_invocation_wrapper( response: "ChatResponse | None" = None fcc_messages: "list[ChatMessage]" = [] - # If tools are provided but tool_choice is not set, default to "auto" for function invocation - tools = _extract_tools(kwargs) - if tools and kwargs.get("tool_choice") is None: - kwargs["tool_choice"] = "auto" - for attempt_idx in range(config.max_iterations if config.enabled else 0): fcc_todo = _collect_approval_responses(prepped_messages) if fcc_todo: diff --git a/python/packages/core/agent_framework/azure/_chat_client.py b/python/packages/core/agent_framework/azure/_chat_client.py index a8bfec0427..59f74259a4 100644 --- a/python/packages/core/agent_framework/azure/_chat_client.py +++ b/python/packages/core/agent_framework/azure/_chat_client.py @@ -154,7 +154,7 @@ def __init__( ) @override - def _parse_text_from_choice(self, choice: Choice | ChunkChoice) -> TextContent | None: + def _parse_text_from_openai(self, choice: Choice | ChunkChoice) -> TextContent | None: """Parse the choice into a TextContent object. Overwritten from OpenAIBaseChatClient to deal with Azure On Your Data function. diff --git a/python/packages/core/agent_framework/openai/_assistants_client.py b/python/packages/core/agent_framework/openai/_assistants_client.py index 38ad9eabb3..e790a44940 100644 --- a/python/packages/core/agent_framework/openai/_assistants_client.py +++ b/python/packages/core/agent_framework/openai/_assistants_client.py @@ -164,7 +164,7 @@ async def __aexit__(self, exc_type: type[BaseException] | None, exc_val: BaseExc async def close(self) -> None: """Clean up any assistants we created.""" if self._should_delete_assistant and self.assistant_id is not None: - client = await self.ensure_client() + client = await self._ensure_client() await client.beta.assistants.delete(self.assistant_id) object.__setattr__(self, "assistant_id", None) object.__setattr__(self, "_should_delete_assistant", False) @@ -188,7 +188,7 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - # Extract necessary state from messages and options + # prepare run_options, tool_results = self._prepare_options(messages, chat_options, **kwargs) # Get the thread ID @@ -204,10 +204,10 @@ async def _inner_get_streaming_response( # Determine which assistant to use and create if needed assistant_id = await self._get_assistant_id_or_create() - # Create the streaming response + # execute stream, thread_id = await self._create_assistant_stream(thread_id, assistant_id, run_options, tool_results) - # Process and yield each update from the stream + # process async for update in self._process_stream_events(stream, thread_id): yield update @@ -222,7 +222,7 @@ async def _get_assistant_id_or_create(self) -> str: if not self.model_id: raise ServiceInitializationError("Parameter 'model_id' is required for assistant creation.") - client = await self.ensure_client() + client = await self._ensure_client() created_assistant = await client.beta.assistants.create( model=self.model_id, description=self.assistant_description, @@ -245,11 +245,11 @@ async def _create_assistant_stream( Returns: tuple: (stream, final_thread_id) """ - client = await self.ensure_client() + client = await self._ensure_client() # Get any active run for this thread thread_run = await self._get_active_thread_run(thread_id) - tool_run_id, tool_outputs = self._convert_function_results_to_tool_output(tool_results) + tool_run_id, tool_outputs = self._prepare_tool_outputs_for_assistants(tool_results) if thread_run is not None and tool_run_id is not None and tool_run_id == thread_run.id and tool_outputs: # There's an active run and we have tool results to submit, so submit the results. @@ -270,7 +270,7 @@ async def _create_assistant_stream( async def _get_active_thread_run(self, thread_id: str | None) -> Run | None: """Get any active run for the given thread.""" - client = await self.ensure_client() + client = await self._ensure_client() if thread_id is None: return None @@ -281,7 +281,7 @@ async def _get_active_thread_run(self, thread_id: str | None) -> Run | None: async def _prepare_thread(self, thread_id: str | None, thread_run: Run | None, run_options: dict[str, Any]) -> str: """Prepare the thread for a new run, creating or cleaning up as needed.""" - client = await self.ensure_client() + client = await self._ensure_client() if thread_id is None: # No thread ID was provided, so create a new thread. thread = await client.beta.threads.create( # type: ignore[reportDeprecated] @@ -330,7 +330,7 @@ async def _process_stream_events(self, stream: Any, thread_id: str) -> AsyncIter response_id=response_id, ) elif response.event == "thread.run.requires_action" and isinstance(response.data, Run): - contents = self._create_function_call_contents(response.data, response_id) + contents = self._parse_function_calls_from_assistants(response.data, response_id) if contents: yield ChatResponseUpdate( role=Role.ASSISTANT, @@ -371,8 +371,8 @@ async def _process_stream_events(self, stream: Any, thread_id: str) -> AsyncIter role=Role.ASSISTANT, ) - def _create_function_call_contents(self, event_data: Run, response_id: str | None) -> list[Contents]: - """Create function call contents from a tool action event.""" + def _parse_function_calls_from_assistants(self, event_data: Run, response_id: str | None) -> list[Contents]: + """Parse function call contents from an assistants tool action event.""" contents: list[Contents] = [] if event_data.required_action is not None: @@ -490,10 +490,11 @@ def _prepare_options( return run_options, tool_results - def _convert_function_results_to_tool_output( + def _prepare_tool_outputs_for_assistants( self, tool_results: list[FunctionResultContent] | None, ) -> tuple[str | None, list[ToolOutput] | None]: + """Prepare function results for submission to the assistants API.""" run_id: str | None = None tool_outputs: list[ToolOutput] | None = None diff --git a/python/packages/core/agent_framework/openai/_chat_client.py b/python/packages/core/agent_framework/openai/_chat_client.py index 7f0feb0fc7..940858b670 100644 --- a/python/packages/core/agent_framework/openai/_chat_client.py +++ b/python/packages/core/agent_framework/openai/_chat_client.py @@ -14,7 +14,7 @@ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice from openai.types.chat.chat_completion_message_custom_tool_call import ChatCompletionMessageCustomToolCall -from pydantic import BaseModel, ValidationError +from pydantic import ValidationError from .._clients import BaseChatClient from .._logging import get_logger @@ -69,10 +69,12 @@ async def _inner_get_response( chat_options: ChatOptions, **kwargs: Any, ) -> ChatResponse: - client = await self.ensure_client() + client = await self._ensure_client() + # prepare options_dict = self._prepare_options(messages, chat_options) try: - return self._create_chat_response( + # execute and process + return self._parse_response_from_openai( await client.chat.completions.create(stream=False, **options_dict), chat_options ) except BadRequestError as ex: @@ -98,14 +100,16 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - client = await self.ensure_client() + client = await self._ensure_client() + # prepare options_dict = self._prepare_options(messages, chat_options) options_dict["stream_options"] = {"include_usage": True} try: + # execute and process async for chunk in await client.chat.completions.create(stream=True, **options_dict): if len(chunk.choices) == 0 and chunk.usage is None: continue - yield self._create_chat_response_update(chunk) + yield self._parse_response_update_from_openai(chunk) except BadRequestError as ex: if ex.code == "content_filter": raise OpenAIContentFilterException( @@ -124,7 +128,9 @@ async def _inner_get_streaming_response( # region content creation - def _chat_to_tool_spec(self, tools: Sequence[ToolProtocol | MutableMapping[str, Any]]) -> list[dict[str, Any]]: + def _prepare_tools_for_openai( + self, tools: Sequence[ToolProtocol | MutableMapping[str, Any]] + ) -> list[dict[str, Any]]: chat_tools: list[dict[str, Any]] = [] for tool in tools: if isinstance(tool, ToolProtocol): @@ -157,51 +163,65 @@ def _process_web_search_tool( return None def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions) -> dict[str, Any]: - # Preprocess web search tool if it exists - options_dict = chat_options.to_dict( + run_options = chat_options.to_dict( exclude={ "type", "instructions", # included as system message - "allow_multiple_tool_calls", # handled separately + "response_format", # handled separately + "additional_properties", # handled separately } ) - if messages and "messages" not in options_dict: - options_dict["messages"] = self._prepare_chat_history_for_request(messages) - if "messages" not in options_dict: + # messages + if messages and "messages" not in run_options: + run_options["messages"] = self._prepare_messages_for_openai(messages) + if "messages" not in run_options: raise ServiceInvalidRequestError("Messages are required for chat completions") + + # Translation between ChatOptions and Chat Completion API + translations = { + "model_id": "model", + "allow_multiple_tool_calls": "parallel_tool_calls", + "max_tokens": "max_output_tokens", + } + for old_key, new_key in translations.items(): + if old_key in run_options and old_key != new_key: + run_options[new_key] = run_options.pop(old_key) + + # model id + if not run_options.get("model"): + if not self.model_id: + raise ValueError("model_id must be a non-empty string") + run_options["model"] = self.model_id + + # tools if chat_options.tools is not None: - web_search_options = self._process_web_search_tool(chat_options.tools) - if web_search_options: - options_dict["web_search_options"] = web_search_options - options_dict["tools"] = self._chat_to_tool_spec(chat_options.tools) - if chat_options.allow_multiple_tool_calls is not None: - options_dict["parallel_tool_calls"] = chat_options.allow_multiple_tool_calls - if not options_dict.get("tools", None): - options_dict.pop("tools", None) - options_dict.pop("parallel_tool_calls", None) - options_dict.pop("tool_choice", None) - - if "model_id" not in options_dict: - options_dict["model"] = self.model_id - else: - options_dict["model"] = options_dict.pop("model_id") - if ( - chat_options.response_format - and isinstance(chat_options.response_format, type) - and issubclass(chat_options.response_format, BaseModel) - ): - options_dict["response_format"] = type_to_response_format_param(chat_options.response_format) - if additional_properties := options_dict.pop("additional_properties", None): - for key, value in additional_properties.items(): - if value is not None: - options_dict[key] = value - if (tool_choice := options_dict.get("tool_choice")) and len(tool_choice.keys()) == 1: - options_dict["tool_choice"] = tool_choice["mode"] - return options_dict - - def _create_chat_response(self, response: ChatCompletion, chat_options: ChatOptions) -> "ChatResponse": - """Create a chat message content object from a choice.""" + # Preprocess web search tool if it exists + if web_search_options := self._process_web_search_tool(chat_options.tools): + run_options["web_search_options"] = web_search_options + run_options["tools"] = self._prepare_tools_for_openai(chat_options.tools) + if not run_options.get("tools", None): + run_options.pop("tools", None) + run_options.pop("parallel_tool_calls", None) + run_options.pop("tool_choice", None) + # tool choice when `tool_choice` is a dict with single key `mode`, extract the mode value + if (tool_choice := run_options.get("tool_choice")) and len(tool_choice.keys()) == 1: + run_options["tool_choice"] = tool_choice["mode"] + + # response format + if chat_options.response_format: + run_options["response_format"] = type_to_response_format_param(chat_options.response_format) + + # additional properties + additional_options = { + key: value for key, value in chat_options.additional_properties.items() if value is not None + } + if additional_options: + run_options.update(additional_options) + return run_options + + def _parse_response_from_openai(self, response: ChatCompletion, chat_options: ChatOptions) -> "ChatResponse": + """Parse a response from OpenAI into a ChatResponse.""" response_metadata = self._get_metadata_from_chat_response(response) messages: list[ChatMessage] = [] finish_reason: FinishReason | None = None @@ -210,15 +230,15 @@ def _create_chat_response(self, response: ChatCompletion, chat_options: ChatOpti if choice.finish_reason: finish_reason = FinishReason(value=choice.finish_reason) contents: list[Contents] = [] - if text_content := self._parse_text_from_choice(choice): + if text_content := self._parse_text_from_openai(choice): contents.append(text_content) - if parsed_tool_calls := [tool for tool in self._get_tool_calls_from_chat_choice(choice)]: + if parsed_tool_calls := [tool for tool in self._parse_tool_calls_from_openai(choice)]: contents.extend(parsed_tool_calls) messages.append(ChatMessage(role="assistant", contents=contents)) return ChatResponse( response_id=response.id, created_at=datetime.fromtimestamp(response.created, tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), - usage_details=self._usage_details_from_openai(response.usage) if response.usage else None, + usage_details=self._parse_usage_from_openai(response.usage) if response.usage else None, messages=messages, model_id=response.model, additional_properties=response_metadata, @@ -226,16 +246,16 @@ def _create_chat_response(self, response: ChatCompletion, chat_options: ChatOpti response_format=chat_options.response_format, ) - def _create_chat_response_update( + def _parse_response_update_from_openai( self, chunk: ChatCompletionChunk, ) -> ChatResponseUpdate: - """Create a streaming chat message content object from a choice.""" + """Parse a streaming response update from OpenAI.""" chunk_metadata = self._get_metadata_from_streaming_chat_response(chunk) if chunk.usage: return ChatResponseUpdate( role=Role.ASSISTANT, - contents=[UsageContent(details=self._usage_details_from_openai(chunk.usage), raw_representation=chunk)], + contents=[UsageContent(details=self._parse_usage_from_openai(chunk.usage), raw_representation=chunk)], model_id=chunk.model, additional_properties=chunk_metadata, response_id=chunk.id, @@ -245,11 +265,11 @@ def _create_chat_response_update( finish_reason: FinishReason | None = None for choice in chunk.choices: chunk_metadata.update(self._get_metadata_from_chat_choice(choice)) - contents.extend(self._get_tool_calls_from_chat_choice(choice)) + contents.extend(self._parse_tool_calls_from_openai(choice)) if choice.finish_reason: finish_reason = FinishReason(value=choice.finish_reason) - if text_content := self._parse_text_from_choice(choice): + if text_content := self._parse_text_from_openai(choice): contents.append(text_content) return ChatResponseUpdate( created_at=datetime.fromtimestamp(chunk.created, tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), @@ -263,7 +283,7 @@ def _create_chat_response_update( message_id=chunk.id, ) - def _usage_details_from_openai(self, usage: CompletionUsage) -> UsageDetails: + def _parse_usage_from_openai(self, usage: CompletionUsage) -> UsageDetails: details = UsageDetails( input_token_count=usage.prompt_tokens, output_token_count=usage.completion_tokens, @@ -285,7 +305,7 @@ def _usage_details_from_openai(self, usage: CompletionUsage) -> UsageDetails: details["prompt/cached_tokens"] = tokens return details - def _parse_text_from_choice(self, choice: Choice | ChunkChoice) -> TextContent | None: + def _parse_text_from_openai(self, choice: Choice | ChunkChoice) -> TextContent | None: """Parse the choice into a TextContent object.""" message = choice.message if isinstance(choice, Choice) else choice.delta if message.content: @@ -312,8 +332,8 @@ def _get_metadata_from_chat_choice(self, choice: Choice | ChunkChoice) -> dict[s "logprobs": getattr(choice, "logprobs", None), } - def _get_tool_calls_from_chat_choice(self, choice: Choice | ChunkChoice) -> list[Contents]: - """Get tool calls from a chat choice.""" + def _parse_tool_calls_from_openai(self, choice: Choice | ChunkChoice) -> list[Contents]: + """Parse tool calls from an OpenAI response choice.""" resp: list[Contents] = [] content = choice.message if isinstance(choice, Choice) else choice.delta if content and content.tool_calls: @@ -331,13 +351,13 @@ def _get_tool_calls_from_chat_choice(self, choice: Choice | ChunkChoice) -> list # When you enable asynchronous content filtering in Azure OpenAI, you may receive empty deltas return resp - def _prepare_chat_history_for_request( + def _prepare_messages_for_openai( self, chat_messages: Sequence[ChatMessage], role_key: str = "role", content_key: str = "content", ) -> list[dict[str, Any]]: - """Prepare the chat history for a request. + """Prepare the chat history for an OpenAI request. Allowing customization of the key names for role/author, and optionally overriding the role. @@ -355,14 +375,14 @@ def _prepare_chat_history_for_request( Returns: prepared_chat_history (Any): The prepared chat history for a request. """ - list_of_list = [self._openai_chat_message_parser(message) for message in chat_messages] + list_of_list = [self._prepare_message_for_openai(message) for message in chat_messages] # Flatten the list of lists into a single list return list(chain.from_iterable(list_of_list)) # region Parsers - def _openai_chat_message_parser(self, message: ChatMessage) -> list[dict[str, Any]]: - """Parse a chat message into the openai format.""" + def _prepare_message_for_openai(self, message: ChatMessage) -> list[dict[str, Any]]: + """Prepare a chat message for OpenAI.""" all_messages: list[dict[str, Any]] = [] for content in message.contents: # Skip approval content - it's internal framework state, not for the LLM @@ -372,13 +392,15 @@ def _openai_chat_message_parser(self, message: ChatMessage) -> list[dict[str, An args: dict[str, Any] = { "role": message.role.value if isinstance(message.role, Role) else message.role, } + if message.author_name and message.role != Role.TOOL: + args["name"] = message.author_name match content: case FunctionCallContent(): if all_messages and "tool_calls" in all_messages[-1]: # If the last message already has tool calls, append to it - all_messages[-1]["tool_calls"].append(self._openai_content_parser(content)) + all_messages[-1]["tool_calls"].append(self._prepare_content_for_openai(content)) else: - args["tool_calls"] = [self._openai_content_parser(content)] # type: ignore + args["tool_calls"] = [self._prepare_content_for_openai(content)] # type: ignore case FunctionResultContent(): args["tool_call_id"] = content.call_id if content.result is not None: @@ -387,13 +409,13 @@ def _openai_chat_message_parser(self, message: ChatMessage) -> list[dict[str, An if "content" not in args: args["content"] = [] # this is a list to allow multi-modal content - args["content"].append(self._openai_content_parser(content)) # type: ignore + args["content"].append(self._prepare_content_for_openai(content)) # type: ignore if "content" in args or "tool_calls" in args: all_messages.append(args) return all_messages - def _openai_content_parser(self, content: Contents) -> dict[str, Any]: - """Parse contents into the openai format.""" + def _prepare_content_for_openai(self, content: Contents) -> dict[str, Any]: + """Prepare content for OpenAI.""" match content: case FunctionCallContent(): args = json.dumps(content.arguments) if isinstance(content.arguments, Mapping) else content.arguments diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index ecdd7be660..746e50150e 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -89,28 +89,16 @@ async def _inner_get_response( chat_options: ChatOptions, **kwargs: Any, ) -> ChatResponse: - client = await self.ensure_client() - run_options = await self.prepare_options(messages, chat_options, **kwargs) - response_format = run_options.pop("response_format", None) - text_config = run_options.pop("text", None) - text_format, text_config = self._prepare_text_config(response_format=response_format, text_config=text_config) - if text_config: - run_options["text"] = text_config + client = await self._ensure_client() + # prepare + run_options = await self._prepare_options(messages, chat_options, **kwargs) try: - if not text_format: - response = await client.responses.create( - stream=False, - **run_options, - ) - chat_options.conversation_id = self.get_conversation_id(response, chat_options.store) - return self._create_response_content(response, chat_options=chat_options) - parsed_response: ParsedResponse[BaseModel] = await client.responses.parse( - text_format=text_format, - stream=False, - **run_options, - ) - chat_options.conversation_id = self.get_conversation_id(parsed_response, chat_options.store) - return self._create_response_content(parsed_response, chat_options=chat_options) + # execute and process + if "text_format" in run_options: + response = await client.responses.parse(stream=False, **run_options) + else: + response = await client.responses.create(stream=False, **run_options) + return self._parse_response_from_openai(response, chat_options=chat_options) except BadRequestError as ex: if ex.code == "content_filter": raise OpenAIContentFilterException( @@ -134,35 +122,23 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: - client = await self.ensure_client() - run_options = await self.prepare_options(messages, chat_options, **kwargs) + client = await self._ensure_client() + # prepare + run_options = await self._prepare_options(messages, chat_options, **kwargs) function_call_ids: dict[int, tuple[str, str]] = {} # output_index: (call_id, name) - response_format = run_options.pop("response_format", None) - text_config = run_options.pop("text", None) - text_format, text_config = self._prepare_text_config(response_format=response_format, text_config=text_config) - if text_config: - run_options["text"] = text_config try: - if not text_format: - response = await client.responses.create( - stream=True, - **run_options, - ) - async for chunk in response: - update = self._create_streaming_response_content( + # execute and process + if "text_format" not in run_options: + async for chunk in await client.responses.create(stream=True, **run_options): + yield self._parse_chunk_from_openai( chunk, chat_options=chat_options, function_call_ids=function_call_ids ) - yield update return - async with client.responses.stream( - text_format=text_format, - **run_options, - ) as response: + async with client.responses.stream(**run_options) as response: async for chunk in response: - update = self._create_streaming_response_content( + yield self._parse_chunk_from_openai( chunk, chat_options=chat_options, function_call_ids=function_call_ids ) - yield update except BadRequestError as ex: if ex.code == "content_filter": raise OpenAIContentFilterException( @@ -179,33 +155,33 @@ async def _inner_get_streaming_response( inner_exception=ex, ) from ex - def _prepare_text_config( + def _prepare_response_and_text_format( self, *, response_format: Any, text_config: MutableMapping[str, Any] | None, ) -> tuple[type[BaseModel] | None, dict[str, Any] | None]: """Normalize response_format into Responses text configuration and parse target.""" - prepared_text = dict(text_config) if isinstance(text_config, MutableMapping) else None if text_config is not None and not isinstance(text_config, MutableMapping): raise ServiceInvalidRequestError("text must be a mapping when provided.") + text_config = cast(dict[str, Any], text_config) if isinstance(text_config, MutableMapping) else None if response_format is None: - return None, prepared_text + return None, text_config if isinstance(response_format, type) and issubclass(response_format, BaseModel): - if prepared_text and "format" in prepared_text: + if text_config and "format" in text_config: raise ServiceInvalidRequestError("response_format cannot be combined with explicit text.format.") - return response_format, prepared_text + return response_format, text_config if isinstance(response_format, Mapping): format_config = self._convert_response_format(cast("Mapping[str, Any]", response_format)) - if prepared_text is None: - prepared_text = {} - elif "format" in prepared_text and prepared_text["format"] != format_config: + if text_config is None: + text_config = {} + elif "format" in text_config and text_config["format"] != format_config: raise ServiceInvalidRequestError("Conflicting response_format definitions detected.") - prepared_text["format"] = format_config - return None, prepared_text + text_config["format"] = format_config + return None, text_config raise ServiceInvalidRequestError("response_format must be a Pydantic model or mapping.") @@ -245,23 +221,33 @@ def _convert_response_format(self, response_format: Mapping[str, Any]) -> dict[s raise ServiceInvalidRequestError("Unsupported response_format provided for Responses client.") - def get_conversation_id( + def _get_conversation_id( self, response: OpenAIResponse | ParsedResponse[BaseModel], store: bool | None ) -> str | None: """Get the conversation ID from the response if store is True.""" - return None if store is False else response.id + if store is False: + return None + # If conversation ID exists, it means that we operate with conversation + # so we use conversation ID as input and output. + if response.conversation and response.conversation.id: + return response.conversation.id + # If conversation ID doesn't exist, we operate with responses + # so we use response ID as input and output. + return response.id # region Prep methods - def _tools_to_response_tools( - self, tools: Sequence[ToolProtocol | MutableMapping[str, Any]] + def _prepare_tools_for_openai( + self, tools: Sequence[ToolProtocol | MutableMapping[str, Any]] | None ) -> list[ToolParam | dict[str, Any]]: response_tools: list[ToolParam | dict[str, Any]] = [] + if not tools: + return response_tools for tool in tools: if isinstance(tool, ToolProtocol): match tool: case HostedMCPTool(): - response_tools.append(self.get_mcp_tool(tool)) + response_tools.append(self._prepare_mcp_tool(tool)) case HostedCodeInterpreterTool(): tool_args: CodeInterpreterContainerCodeInterpreterToolAuto = {"type": "auto"} if tool.inputs: @@ -363,7 +349,8 @@ def _tools_to_response_tools( response_tools.append(tool_dict) return response_tools - def get_mcp_tool(self, tool: HostedMCPTool) -> Any: + @staticmethod + def _prepare_mcp_tool(tool: HostedMCPTool) -> Mcp: """Get MCP tool from HostedMCPTool.""" mcp: Mcp = { "type": "mcp", @@ -386,18 +373,13 @@ def get_mcp_tool(self, tool: HostedMCPTool) -> Any: return mcp - async def prepare_options( + async def _prepare_options( self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any, ) -> dict[str, Any]: """Take ChatOptions and create the specific options for Responses API.""" - conversation_id = kwargs.pop("conversation_id", None) - - if conversation_id: - chat_options.conversation_id = conversation_id - run_options: dict[str, Any] = chat_options.to_dict( exclude={ "type", @@ -407,12 +389,24 @@ async def prepare_options( "seed", # not supported "stop", # not supported "instructions", # already added as system message + "response_format", # handled separately + "conversation_id", # handled separately + "additional_properties", # handled separately } ) + # messages + request_input = self._prepare_messages_for_openai(messages) + if not request_input: + raise ServiceInvalidRequestError("Messages are required for chat completions") + run_options["input"] = request_input - if chat_options.response_format: - run_options["response_format"] = chat_options.response_format + # model id + if not run_options.get("model"): + if not self.model_id: + raise ValueError("model_id must be a non-empty string") + run_options["model"] = self.model_id + # translations between ChatOptions and Responses API translations = { "model_id": "model", "allow_multiple_tool_calls": "parallel_tool_calls", @@ -423,34 +417,53 @@ async def prepare_options( if old_key in run_options and old_key != new_key: run_options[new_key] = run_options.pop(old_key) + # Handle different conversation ID formats + if conversation_id := self._get_current_conversation_id(chat_options, **kwargs): + if conversation_id.startswith("resp_"): + # For response IDs, set previous_response_id and remove conversation property + run_options["previous_response_id"] = conversation_id + elif conversation_id.startswith("conv_"): + # For conversation IDs, set conversation and remove previous_response_id property + run_options["conversation"] = conversation_id + else: + # If the format is unrecognized, default to previous_response_id + run_options["previous_response_id"] = conversation_id + # tools - if chat_options.tools is None: - run_options.pop("parallel_tool_calls", None) + if tools := self._prepare_tools_for_openai(chat_options.tools): + run_options["tools"] = tools else: - run_options["tools"] = self._tools_to_response_tools(chat_options.tools) + run_options.pop("parallel_tool_calls", None) + run_options.pop("tool_choice", None) + # tool choice when `tool_choice` is a dict with single key `mode`, extract the mode value + if (tool_choice := run_options.get("tool_choice")) and len(tool_choice.keys()) == 1: + run_options["tool_choice"] = tool_choice["mode"] - # model id - if not run_options.get("model"): - if not self.model_id: - raise ValueError("model_id must be a non-empty string") - run_options["model"] = self.model_id + # additional properties + additional_options = { + key: value for key, value in chat_options.additional_properties.items() if value is not None + } + if additional_options: + run_options.update(additional_options) - # messages - request_input = self._prepare_chat_messages_for_request(messages) - if not request_input: - raise ServiceInvalidRequestError("Messages are required for chat completions") - run_options["input"] = request_input + # response format and text config (after additional_properties so user can pass text via additional_properties) + response_format = chat_options.response_format + text_config = run_options.pop("text", None) + response_format, text_config = self._prepare_response_and_text_format( + response_format=response_format, text_config=text_config + ) + if text_config: + run_options["text"] = text_config + if response_format: + run_options["text_format"] = response_format - # additional provider specific settings - if additional_properties := run_options.pop("additional_properties", None): - for key, value in additional_properties.items(): - if value is not None: - run_options[key] = value - if (tool_choice := run_options.get("tool_choice")) and len(tool_choice.keys()) == 1: - run_options["tool_choice"] = tool_choice["mode"] return run_options - def _prepare_chat_messages_for_request(self, chat_messages: Sequence[ChatMessage]) -> list[dict[str, Any]]: + def _get_current_conversation_id(self, chat_options: ChatOptions, **kwargs: Any) -> str | None: + """Get the current conversation ID from chat options or kwargs.""" + return chat_options.conversation_id or kwargs.get("conversation_id") + + def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) -> list[dict[str, Any]]: """Prepare the chat messages for a request. Allowing customization of the key names for role/author, and optionally overriding the role. @@ -476,16 +489,16 @@ def _prepare_chat_messages_for_request(self, chat_messages: Sequence[ChatMessage and "fc_id" in content.additional_properties ): call_id_to_id[content.call_id] = content.additional_properties["fc_id"] - list_of_list = [self._openai_chat_message_parser(message, call_id_to_id) for message in chat_messages] + list_of_list = [self._prepare_message_for_openai(message, call_id_to_id) for message in chat_messages] # Flatten the list of lists into a single list return list(chain.from_iterable(list_of_list)) - def _openai_chat_message_parser( + def _prepare_message_for_openai( self, message: ChatMessage, call_id_to_id: dict[str, str], ) -> list[dict[str, Any]]: - """Parse a chat message into the openai format.""" + """Prepare a chat message for the OpenAI Responses API format.""" all_messages: list[dict[str, Any]] = [] args: dict[str, Any] = { "role": message.role.value if isinstance(message.role, Role) else message.role, @@ -497,28 +510,28 @@ def _openai_chat_message_parser( continue case FunctionResultContent(): new_args: dict[str, Any] = {} - new_args.update(self._openai_content_parser(message.role, content, call_id_to_id)) + new_args.update(self._prepare_content_for_openai(message.role, content, call_id_to_id)) all_messages.append(new_args) case FunctionCallContent(): - function_call = self._openai_content_parser(message.role, content, call_id_to_id) + function_call = self._prepare_content_for_openai(message.role, content, call_id_to_id) all_messages.append(function_call) # type: ignore case FunctionApprovalResponseContent() | FunctionApprovalRequestContent(): - all_messages.append(self._openai_content_parser(message.role, content, call_id_to_id)) # type: ignore + all_messages.append(self._prepare_content_for_openai(message.role, content, call_id_to_id)) # type: ignore case _: if "content" not in args: args["content"] = [] - args["content"].append(self._openai_content_parser(message.role, content, call_id_to_id)) # type: ignore + args["content"].append(self._prepare_content_for_openai(message.role, content, call_id_to_id)) # type: ignore if "content" in args or "tool_calls" in args: all_messages.append(args) return all_messages - def _openai_content_parser( + def _prepare_content_for_openai( self, role: Role, content: Contents, call_id_to_id: dict[str, str], ) -> dict[str, Any]: - """Parse contents into the openai format.""" + """Prepare content for the OpenAI Responses API format.""" match content: case TextContent(): return { @@ -625,14 +638,13 @@ def _openai_content_parser( logger.debug("Unsupported content type passed (type: %s)", type(content)) return {} - # region Response creation methods - - def _create_response_content( + # region Parse methods + def _parse_response_from_openai( self, response: OpenAIResponse | ParsedResponse[BaseModel], chat_options: ChatOptions, ) -> "ChatResponse": - """Create a chat message content object from a choice.""" + """Parse an OpenAI Responses API response into a ChatResponse.""" structured_response: BaseModel | None = response.output_parsed if isinstance(response, ParsedResponse) else None # type: ignore[reportUnknownMemberType] metadata: dict[str, Any] = response.metadata or {} @@ -826,11 +838,9 @@ def _create_response_content( "raw_representation": response, } - conversation_id = self.get_conversation_id(response, chat_options.store) # type: ignore[reportArgumentType] - - if conversation_id: + if conversation_id := self._get_conversation_id(response, chat_options.store): args["conversation_id"] = conversation_id - if response.usage and (usage_details := self._usage_details_from_openai(response.usage)): + if response.usage and (usage_details := self._parse_usage_from_openai(response.usage)): args["usage_details"] = usage_details if structured_response: args["value"] = structured_response @@ -838,13 +848,13 @@ def _create_response_content( args["response_format"] = chat_options.response_format return ChatResponse(**args) - def _create_streaming_response_content( + def _parse_chunk_from_openai( self, event: OpenAIResponseStreamEvent, chat_options: ChatOptions, function_call_ids: dict[int, tuple[str, str]], ) -> ChatResponseUpdate: - """Create a streaming chat message content object from a choice.""" + """Parse an OpenAI Responses API streaming event into a ChatResponseUpdate.""" metadata: dict[str, Any] = {} contents: list[Contents] = [] conversation_id: str | None = None @@ -931,10 +941,10 @@ def _create_streaming_response_content( contents.append(TextReasoningContent(text=event.text, raw_representation=event)) metadata.update(self._get_metadata_from_response(event)) case "response.completed": - conversation_id = self.get_conversation_id(event.response, chat_options.store) + conversation_id = self._get_conversation_id(event.response, chat_options.store) model = event.response.model if event.response.usage: - usage = self._usage_details_from_openai(event.response.usage) + usage = self._parse_usage_from_openai(event.response.usage) if usage: contents.append(UsageContent(details=usage, raw_representation=event)) case "response.output_item.added": @@ -1102,7 +1112,7 @@ def _get_ann_value(key: str) -> Any: raw_representation=event, ) - def _usage_details_from_openai(self, usage: ResponseUsage) -> UsageDetails | None: + def _parse_usage_from_openai(self, usage: ResponseUsage) -> UsageDetails | None: details = UsageDetails( input_token_count=usage.input_tokens, output_token_count=usage.output_tokens, diff --git a/python/packages/core/agent_framework/openai/_shared.py b/python/packages/core/agent_framework/openai/_shared.py index e0df8844e4..77189168f1 100644 --- a/python/packages/core/agent_framework/openai/_shared.py +++ b/python/packages/core/agent_framework/openai/_shared.py @@ -160,16 +160,16 @@ def __init__(self, *, model_id: str | None = None, client: AsyncOpenAI | None = for key, value in kwargs.items(): setattr(self, key, value) - async def initialize_client(self) -> None: + async def _initialize_client(self) -> None: """Initialize OpenAI client asynchronously. Override in subclasses to initialize the OpenAI client asynchronously. """ pass - async def ensure_client(self) -> AsyncOpenAI: + async def _ensure_client(self) -> AsyncOpenAI: """Ensure OpenAI client is initialized.""" - await self.initialize_client() + await self._initialize_client() if self.client is None: raise ServiceInitializationError("OpenAI client is not initialized") diff --git a/python/packages/core/tests/azure/test_azure_chat_client.py b/python/packages/core/tests/azure/test_azure_chat_client.py index 1b7dbb904b..7da838529f 100644 --- a/python/packages/core/tests/azure/test_azure_chat_client.py +++ b/python/packages/core/tests/azure/test_azure_chat_client.py @@ -193,7 +193,7 @@ async def test_cmc( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], stream=False, - messages=azure_chat_client._prepare_chat_history_for_request(chat_history), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(chat_history), # type: ignore ) @@ -216,7 +216,7 @@ async def test_cmc_with_logit_bias( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], - messages=azure_chat_client._prepare_chat_history_for_request(chat_history), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(chat_history), # type: ignore stream=False, logit_bias=token_bias, ) @@ -241,7 +241,7 @@ async def test_cmc_with_stop( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], - messages=azure_chat_client._prepare_chat_history_for_request(chat_history), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(chat_history), # type: ignore stream=False, stop=stop, ) @@ -311,7 +311,7 @@ async def test_azure_on_your_data( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], - messages=azure_chat_client._prepare_chat_history_for_request(messages_out), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(messages_out), # type: ignore stream=False, extra_body=expected_data_settings, ) @@ -381,7 +381,7 @@ async def test_azure_on_your_data_string( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], - messages=azure_chat_client._prepare_chat_history_for_request(messages_out), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(messages_out), # type: ignore stream=False, extra_body=expected_data_settings, ) @@ -438,7 +438,7 @@ async def test_azure_on_your_data_fail( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], - messages=azure_chat_client._prepare_chat_history_for_request(messages_out), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(messages_out), # type: ignore stream=False, extra_body=expected_data_settings, ) @@ -584,7 +584,7 @@ async def test_get_streaming( mock_create.assert_awaited_once_with( model=azure_openai_unit_test_env["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"], stream=True, - messages=azure_chat_client._prepare_chat_history_for_request(chat_history), # type: ignore + messages=azure_chat_client._prepare_messages_for_openai(chat_history), # type: ignore # NOTE: The `stream_options={"include_usage": True}` is explicitly enforced in # `OpenAIChatCompletionBase._inner_get_streaming_response`. # To ensure consistency, we align the arguments here accordingly. diff --git a/python/packages/core/tests/core/test_mcp.py b/python/packages/core/tests/core/test_mcp.py index 93643da30f..18c90d64b3 100644 --- a/python/packages/core/tests/core/test_mcp.py +++ b/python/packages/core/tests/core/test_mcp.py @@ -24,14 +24,14 @@ ) from agent_framework._mcp import ( MCPTool, - _ai_content_to_mcp_types, - _chat_message_to_mcp_types, _get_input_model_from_mcp_prompt, _get_input_model_from_mcp_tool, - _mcp_call_tool_result_to_ai_contents, - _mcp_prompt_message_to_chat_message, - _mcp_type_to_ai_content, _normalize_mcp_name, + _parse_content_from_mcp, + _parse_contents_from_mcp_tool_result, + _parse_message_from_mcp, + _prepare_content_for_mcp, + _prepare_message_for_mcp, ) from agent_framework.exceptions import ToolException, ToolExecutionException @@ -60,7 +60,7 @@ def test_normalize_mcp_name(): def test_mcp_prompt_message_to_ai_content(): """Test conversion from MCP prompt message to AI content.""" mcp_message = types.PromptMessage(role="user", content=types.TextContent(type="text", text="Hello, world!")) - ai_content = _mcp_prompt_message_to_chat_message(mcp_message) + ai_content = _parse_message_from_mcp(mcp_message) assert isinstance(ai_content, ChatMessage) assert ai_content.role.value == "user" @@ -70,7 +70,7 @@ def test_mcp_prompt_message_to_ai_content(): assert ai_content.raw_representation == mcp_message -def test_mcp_call_tool_result_to_ai_contents(): +def test_parse_contents_from_mcp_tool_result(): """Test conversion from MCP tool result to AI contents.""" mcp_result = types.CallToolResult( content=[ @@ -79,7 +79,7 @@ def test_mcp_call_tool_result_to_ai_contents(): types.ImageContent(type="image", data=b"abc", mimeType="image/webp"), ] ) - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 3 assert isinstance(ai_contents[0], TextContent) @@ -100,7 +100,7 @@ def test_mcp_call_tool_result_with_meta_error(): _meta={"isError": True, "errorCode": "TOOL_ERROR", "errorMessage": "Tool execution failed"}, ) - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 1 assert isinstance(ai_contents[0], TextContent) @@ -131,7 +131,7 @@ def test_mcp_call_tool_result_with_meta_arbitrary_data(): }, ) - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 1 assert isinstance(ai_contents[0], TextContent) @@ -153,7 +153,7 @@ def test_mcp_call_tool_result_with_meta_merging_existing_properties(): text_content = types.TextContent(type="text", text="Test content") mcp_result = types.CallToolResult(content=[text_content], _meta={"newField": "newValue", "isError": False}) - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 1 content = ai_contents[0] @@ -169,7 +169,7 @@ def test_mcp_call_tool_result_with_meta_none(): mcp_result = types.CallToolResult(content=[types.TextContent(type="text", text="No meta test")]) # No _meta field set - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 1 assert isinstance(ai_contents[0], TextContent) @@ -191,7 +191,7 @@ def test_mcp_call_tool_result_regression_successful_workflow(): ] ) - ai_contents = _mcp_call_tool_result_to_ai_contents(mcp_result) + ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) # Verify basic conversion still works correctly assert len(ai_contents) == 2 @@ -213,7 +213,7 @@ def test_mcp_call_tool_result_regression_successful_workflow(): def test_mcp_content_types_to_ai_content_text(): """Test conversion of MCP text content to AI content.""" mcp_content = types.TextContent(type="text", text="Sample text") - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, TextContent) assert ai_content.text == "Sample text" @@ -224,7 +224,7 @@ def test_mcp_content_types_to_ai_content_image(): """Test conversion of MCP image content to AI content.""" mcp_content = types.ImageContent(type="image", data="abc", mimeType="image/jpeg") mcp_content = types.ImageContent(type="image", data=b"abc", mimeType="image/jpeg") - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, DataContent) assert ai_content.uri == "data:image/jpeg;base64,abc" @@ -235,7 +235,7 @@ def test_mcp_content_types_to_ai_content_image(): def test_mcp_content_types_to_ai_content_audio(): """Test conversion of MCP audio content to AI content.""" mcp_content = types.AudioContent(type="audio", data="def", mimeType="audio/wav") - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, DataContent) assert ai_content.uri == "data:audio/wav;base64,def" @@ -251,7 +251,7 @@ def test_mcp_content_types_to_ai_content_resource_link(): name="test_resource", mimeType="application/json", ) - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, UriContent) assert ai_content.uri == "https://example.com/resource" @@ -267,7 +267,7 @@ def test_mcp_content_types_to_ai_content_embedded_resource_text(): text="Embedded text content", ) mcp_content = types.EmbeddedResource(type="resource", resource=text_resource) - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, TextContent) assert ai_content.text == "Embedded text content" @@ -283,7 +283,7 @@ def test_mcp_content_types_to_ai_content_embedded_resource_blob(): blob="data:application/octet-stream;base64,dGVzdCBkYXRh", ) mcp_content = types.EmbeddedResource(type="resource", resource=blob_resource) - ai_content = _mcp_type_to_ai_content(mcp_content)[0] + ai_content = _parse_content_from_mcp(mcp_content)[0] assert isinstance(ai_content, DataContent) assert ai_content.uri == "data:application/octet-stream;base64,dGVzdCBkYXRh" @@ -294,7 +294,7 @@ def test_mcp_content_types_to_ai_content_embedded_resource_blob(): def test_ai_content_to_mcp_content_types_text(): """Test conversion of AI text content to MCP content.""" ai_content = TextContent(text="Sample text") - mcp_content = _ai_content_to_mcp_types(ai_content) + mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.TextContent) assert mcp_content.type == "text" @@ -304,7 +304,7 @@ def test_ai_content_to_mcp_content_types_text(): def test_ai_content_to_mcp_content_types_data_image(): """Test conversion of AI data content to MCP content.""" ai_content = DataContent(uri="data:image/png;base64,xyz", media_type="image/png") - mcp_content = _ai_content_to_mcp_types(ai_content) + mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.ImageContent) assert mcp_content.type == "image" @@ -315,7 +315,7 @@ def test_ai_content_to_mcp_content_types_data_image(): def test_ai_content_to_mcp_content_types_data_audio(): """Test conversion of AI data content to MCP content.""" ai_content = DataContent(uri="data:audio/mpeg;base64,xyz", media_type="audio/mpeg") - mcp_content = _ai_content_to_mcp_types(ai_content) + mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.AudioContent) assert mcp_content.type == "audio" @@ -329,7 +329,7 @@ def test_ai_content_to_mcp_content_types_data_binary(): uri="data:application/octet-stream;base64,xyz", media_type="application/octet-stream", ) - mcp_content = _ai_content_to_mcp_types(ai_content) + mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.EmbeddedResource) assert mcp_content.type == "resource" @@ -340,7 +340,7 @@ def test_ai_content_to_mcp_content_types_data_binary(): def test_ai_content_to_mcp_content_types_uri(): """Test conversion of AI URI content to MCP content.""" ai_content = UriContent(uri="https://example.com/resource", media_type="application/json") - mcp_content = _ai_content_to_mcp_types(ai_content) + mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.ResourceLink) assert mcp_content.type == "resource_link" @@ -348,7 +348,7 @@ def test_ai_content_to_mcp_content_types_uri(): assert mcp_content.mimeType == "application/json" -def test_chat_message_to_mcp_types(): +def test_prepare_message_for_mcp(): message = ChatMessage( role="user", contents=[ @@ -356,7 +356,7 @@ def test_chat_message_to_mcp_types(): DataContent(uri="data:image/png;base64,xyz", media_type="image/png"), ], ) - mcp_contents = _chat_message_to_mcp_types(message) + mcp_contents = _prepare_message_for_mcp(message) assert len(mcp_contents) == 2 assert isinstance(mcp_contents[0], types.TextContent) assert isinstance(mcp_contents[1], types.ImageContent) diff --git a/python/packages/core/tests/openai/test_openai_assistants_client.py b/python/packages/core/tests/openai/test_openai_assistants_client.py index b9c32b14b5..861ccc73d1 100644 --- a/python/packages/core/tests/openai/test_openai_assistants_client.py +++ b/python/packages/core/tests/openai/test_openai_assistants_client.py @@ -463,9 +463,9 @@ async def test_openai_assistants_client_process_stream_events_requires_action(mo """Test _process_stream_events with thread.run.requires_action event.""" chat_client = create_test_openai_assistants_client(mock_async_openai) - # Mock the _create_function_call_contents method to return test content + # Mock the _parse_function_calls_from_assistants method to return test content test_function_content = FunctionCallContent(call_id="call-123", name="test_func", arguments={"arg": "value"}) - chat_client._create_function_call_contents = MagicMock(return_value=[test_function_content]) # type: ignore + chat_client._parse_function_calls_from_assistants = MagicMock(return_value=[test_function_content]) # type: ignore # Create a mock Run object mock_run = MagicMock(spec=Run) @@ -498,8 +498,8 @@ async def async_iterator() -> Any: assert update.contents[0] == test_function_content assert update.raw_representation == mock_run - # Verify _create_function_call_contents was called correctly - chat_client._create_function_call_contents.assert_called_once_with(mock_run, None) # type: ignore + # Verify _parse_function_calls_from_assistants was called correctly + chat_client._parse_function_calls_from_assistants.assert_called_once_with(mock_run, None) # type: ignore async def test_openai_assistants_client_process_stream_events_run_step_created(mock_async_openai: MagicMock) -> None: @@ -585,8 +585,8 @@ async def async_iterator() -> Any: assert update.raw_representation == mock_run -def test_openai_assistants_client_create_function_call_contents_basic(mock_async_openai: MagicMock) -> None: - """Test _create_function_call_contents with a simple function call.""" +def test_openai_assistants_client_parse_function_calls_from_assistants_basic(mock_async_openai: MagicMock) -> None: + """Test _parse_function_calls_from_assistants with a simple function call.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -605,7 +605,7 @@ def test_openai_assistants_client_create_function_call_contents_basic(mock_async # Call the method response_id = "response_456" - contents = chat_client._create_function_call_contents(mock_run, response_id) # type: ignore + contents = chat_client._parse_function_calls_from_assistants(mock_run, response_id) # type: ignore # Test that one function call content was created assert len(contents) == 1 @@ -825,24 +825,24 @@ def test_openai_assistants_client_prepare_options_with_image_content(mock_async_ assert message["content"][0]["image_url"]["url"] == "https://example.com/image.jpg" -def test_openai_assistants_client_convert_function_results_to_tool_output_empty(mock_async_openai: MagicMock) -> None: - """Test _convert_function_results_to_tool_output with empty list.""" +def test_openai_assistants_client_prepare_tool_outputs_for_assistants_empty(mock_async_openai: MagicMock) -> None: + """Test _prepare_tool_outputs_for_assistants with empty list.""" chat_client = create_test_openai_assistants_client(mock_async_openai) - run_id, tool_outputs = chat_client._convert_function_results_to_tool_output([]) # type: ignore + run_id, tool_outputs = chat_client._prepare_tool_outputs_for_assistants([]) # type: ignore assert run_id is None assert tool_outputs is None -def test_openai_assistants_client_convert_function_results_to_tool_output_valid(mock_async_openai: MagicMock) -> None: - """Test _convert_function_results_to_tool_output with valid function results.""" +def test_openai_assistants_client_prepare_tool_outputs_for_assistants_valid(mock_async_openai: MagicMock) -> None: + """Test _prepare_tool_outputs_for_assistants with valid function results.""" chat_client = create_test_openai_assistants_client(mock_async_openai) call_id = json.dumps(["run-123", "call-456"]) function_result = FunctionResultContent(call_id=call_id, result="Function executed successfully") - run_id, tool_outputs = chat_client._convert_function_results_to_tool_output([function_result]) # type: ignore + run_id, tool_outputs = chat_client._prepare_tool_outputs_for_assistants([function_result]) # type: ignore assert run_id == "run-123" assert tool_outputs is not None @@ -851,10 +851,10 @@ def test_openai_assistants_client_convert_function_results_to_tool_output_valid( assert tool_outputs[0].get("output") == "Function executed successfully" -def test_openai_assistants_client_convert_function_results_to_tool_output_mismatched_run_ids( +def test_openai_assistants_client_prepare_tool_outputs_for_assistants_mismatched_run_ids( mock_async_openai: MagicMock, ) -> None: - """Test _convert_function_results_to_tool_output with mismatched run IDs.""" + """Test _prepare_tool_outputs_for_assistants with mismatched run IDs.""" chat_client = create_test_openai_assistants_client(mock_async_openai) # Create function results with different run IDs @@ -863,7 +863,7 @@ def test_openai_assistants_client_convert_function_results_to_tool_output_mismat function_result1 = FunctionResultContent(call_id=call_id1, result="Result 1") function_result2 = FunctionResultContent(call_id=call_id2, result="Result 2") - run_id, tool_outputs = chat_client._convert_function_results_to_tool_output([function_result1, function_result2]) # type: ignore + run_id, tool_outputs = chat_client._prepare_tool_outputs_for_assistants([function_result1, function_result2]) # type: ignore # Should only process the first one since run IDs don't match assert run_id == "run-123" diff --git a/python/packages/core/tests/openai/test_openai_chat_client.py b/python/packages/core/tests/openai/test_openai_chat_client.py index 8af3ed61aa..d2ddc1fb02 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client.py +++ b/python/packages/core/tests/openai/test_openai_chat_client.py @@ -182,12 +182,12 @@ def test_unsupported_tool_handling(openai_unit_test_env: dict[str, str]) -> None unsupported_tool.__class__.__name__ = "UnsupportedAITool" # This should ignore the unsupported ToolProtocol and return empty list - result = client._chat_to_tool_spec([unsupported_tool]) # type: ignore + result = client._prepare_tools_for_openai([unsupported_tool]) # type: ignore assert result == [] # Also test with a non-ToolProtocol that should be converted to dict dict_tool = {"type": "function", "name": "test"} - result = client._chat_to_tool_spec([dict_tool]) # type: ignore + result = client._prepare_tools_for_openai([dict_tool]) # type: ignore assert result == [dict_tool] @@ -637,7 +637,7 @@ def test_chat_response_content_order_text_before_tool_calls(openai_unit_test_env ) client = OpenAIChatClient() - response = client._create_chat_response(mock_response, ChatOptions()) + response = client._parse_response_from_openai(mock_response, ChatOptions()) # Verify we have both text and tool call content assert len(response.messages) == 1 @@ -658,7 +658,7 @@ def test_function_result_falsy_values_handling(openai_unit_test_env: dict[str, s # Test with empty list (falsy but not None) message_with_empty_list = ChatMessage(role="tool", contents=[FunctionResultContent(call_id="call-123", result=[])]) - openai_messages = client._openai_chat_message_parser(message_with_empty_list) + openai_messages = client._prepare_message_for_openai(message_with_empty_list) assert len(openai_messages) == 1 assert openai_messages[0]["content"] == "[]" # Empty list should be JSON serialized @@ -667,14 +667,14 @@ def test_function_result_falsy_values_handling(openai_unit_test_env: dict[str, s role="tool", contents=[FunctionResultContent(call_id="call-456", result="")] ) - openai_messages = client._openai_chat_message_parser(message_with_empty_string) + openai_messages = client._prepare_message_for_openai(message_with_empty_string) assert len(openai_messages) == 1 assert openai_messages[0]["content"] == "" # Empty string should be preserved # Test with False (falsy but not None) message_with_false = ChatMessage(role="tool", contents=[FunctionResultContent(call_id="call-789", result=False)]) - openai_messages = client._openai_chat_message_parser(message_with_false) + openai_messages = client._prepare_message_for_openai(message_with_false) assert len(openai_messages) == 1 assert openai_messages[0]["content"] == "false" # False should be JSON serialized @@ -695,7 +695,7 @@ def test_function_result_exception_handling(openai_unit_test_env: dict[str, str] ], ) - openai_messages = client._openai_chat_message_parser(message_with_exception) + openai_messages = client._prepare_message_for_openai(message_with_exception) assert len(openai_messages) == 1 assert openai_messages[0]["content"] == "Error: Function failed." assert openai_messages[0]["tool_call_id"] == "call-123" @@ -708,8 +708,8 @@ def test_prepare_function_call_results_string_passthrough(): assert isinstance(result, str) -def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str, str]) -> None: - """Test _openai_content_parser converts DataContent with image media type to OpenAI format.""" +def test_prepare_content_for_openai_data_content_image(openai_unit_test_env: dict[str, str]) -> None: + """Test _prepare_content_for_openai converts DataContent with image media type to OpenAI format.""" client = OpenAIChatClient() # Test DataContent with image media type @@ -718,7 +718,7 @@ def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str media_type="image/png", ) - result = client._openai_content_parser(image_data_content) # type: ignore + result = client._prepare_content_for_openai(image_data_content) # type: ignore # Should convert to OpenAI image_url format assert result["type"] == "image_url" @@ -727,7 +727,7 @@ def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str # Test DataContent with non-image media type should use default model_dump text_data_content = DataContent(uri="data:text/plain;base64,SGVsbG8gV29ybGQ=", media_type="text/plain") - result = client._openai_content_parser(text_data_content) # type: ignore + result = client._prepare_content_for_openai(text_data_content) # type: ignore # Should use default model_dump format assert result["type"] == "data" @@ -740,7 +740,7 @@ def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str media_type="audio/wav", ) - result = client._openai_content_parser(audio_data_content) # type: ignore + result = client._prepare_content_for_openai(audio_data_content) # type: ignore # Should convert to OpenAI input_audio format assert result["type"] == "input_audio" @@ -751,7 +751,7 @@ def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str # Test DataContent with MP3 audio mp3_data_content = DataContent(uri="data:audio/mp3;base64,//uQAAAAWGluZwAAAA8AAAACAAACcQ==", media_type="audio/mp3") - result = client._openai_content_parser(mp3_data_content) # type: ignore + result = client._prepare_content_for_openai(mp3_data_content) # type: ignore # Should convert to OpenAI input_audio format with mp3 assert result["type"] == "input_audio" @@ -760,8 +760,8 @@ def test_openai_content_parser_data_content_image(openai_unit_test_env: dict[str assert result["input_audio"]["format"] == "mp3" -def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[str, str]) -> None: - """Test _openai_content_parser converts document files (PDF, DOCX, etc.) to OpenAI file format.""" +def test_prepare_content_for_openai_document_file_mapping(openai_unit_test_env: dict[str, str]) -> None: + """Test _prepare_content_for_openai converts document files (PDF, DOCX, etc.) to OpenAI file format.""" client = OpenAIChatClient() # Test PDF without filename - should omit filename in OpenAI payload @@ -770,7 +770,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ media_type="application/pdf", ) - result = client._openai_content_parser(pdf_data_content) # type: ignore + result = client._prepare_content_for_openai(pdf_data_content) # type: ignore # Should convert to OpenAI file format without filename assert result["type"] == "file" @@ -787,7 +787,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ additional_properties={"filename": "report.pdf"}, ) - result = client._openai_content_parser(pdf_with_filename) # type: ignore + result = client._prepare_content_for_openai(pdf_with_filename) # type: ignore # Should use custom filename assert result["type"] == "file" @@ -820,7 +820,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ media_type=case["media_type"], ) - result = client._openai_content_parser(doc_content) # type: ignore + result = client._prepare_content_for_openai(doc_content) # type: ignore # All application/* types should now be mapped to file format assert result["type"] == "file" @@ -834,7 +834,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ additional_properties={"filename": case["filename"]}, ) - result = client._openai_content_parser(doc_with_filename) # type: ignore + result = client._prepare_content_for_openai(doc_with_filename) # type: ignore # Should now use file format with filename assert result["type"] == "file" @@ -848,7 +848,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ additional_properties={}, ) - result = client._openai_content_parser(pdf_empty_props) # type: ignore + result = client._prepare_content_for_openai(pdf_empty_props) # type: ignore assert result["type"] == "file" assert "filename" not in result["file"] @@ -860,7 +860,7 @@ def test_openai_content_parser_document_file_mapping(openai_unit_test_env: dict[ additional_properties={"filename": None}, ) - result = client._openai_content_parser(pdf_none_filename) # type: ignore + result = client._prepare_content_for_openai(pdf_none_filename) # type: ignore assert result["type"] == "file" assert "filename" not in result["file"] # None filename should be omitted diff --git a/python/packages/core/tests/openai/test_openai_chat_client_base.py b/python/packages/core/tests/openai/test_openai_chat_client_base.py index b146bad613..3e48899509 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client_base.py +++ b/python/packages/core/tests/openai/test_openai_chat_client_base.py @@ -76,7 +76,7 @@ async def test_cmc( mock_create.assert_awaited_once_with( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=False, - messages=openai_chat_completion._prepare_chat_history_for_request(chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(chat_history), # type: ignore ) @@ -97,7 +97,7 @@ async def test_cmc_chat_options( mock_create.assert_awaited_once_with( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=False, - messages=openai_chat_completion._prepare_chat_history_for_request(chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(chat_history), # type: ignore ) @@ -120,7 +120,7 @@ async def test_cmc_no_fcc_in_response( mock_create.assert_awaited_once_with( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=False, - messages=openai_chat_completion._prepare_chat_history_for_request(orig_chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(orig_chat_history), # type: ignore ) @@ -167,7 +167,7 @@ async def test_scmc_chat_options( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=True, stream_options={"include_usage": True}, - messages=openai_chat_completion._prepare_chat_history_for_request(chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(chat_history), # type: ignore ) @@ -203,7 +203,7 @@ async def test_cmc_additional_properties( mock_create.assert_awaited_once_with( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=False, - messages=openai_chat_completion._prepare_chat_history_for_request(chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(chat_history), # type: ignore reasoning_effort="low", ) @@ -246,7 +246,7 @@ async def test_get_streaming( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=True, stream_options={"include_usage": True}, - messages=openai_chat_completion._prepare_chat_history_for_request(orig_chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(orig_chat_history), # type: ignore ) @@ -285,7 +285,7 @@ async def test_get_streaming_singular( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=True, stream_options={"include_usage": True}, - messages=openai_chat_completion._prepare_chat_history_for_request(orig_chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(orig_chat_history), # type: ignore ) @@ -349,7 +349,7 @@ async def test_get_streaming_no_fcc_in_response( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=True, stream_options={"include_usage": True}, - messages=openai_chat_completion._prepare_chat_history_for_request(orig_chat_history), # type: ignore + messages=openai_chat_completion._prepare_messages_for_openai(orig_chat_history), # type: ignore ) @@ -399,7 +399,7 @@ def test_chat_response_created_at_uses_utc(openai_unit_test_env: dict[str, str]) ) client = OpenAIChatClient() - response = client._create_chat_response(mock_response, ChatOptions()) + response = client._parse_response_from_openai(mock_response, ChatOptions()) # Verify that created_at is correctly formatted as UTC assert response.created_at is not None @@ -431,7 +431,7 @@ def test_chat_response_update_created_at_uses_utc(openai_unit_test_env: dict[str ) client = OpenAIChatClient() - response_update = client._create_chat_response_update(mock_chunk) + response_update = client._parse_response_update_from_openai(mock_chunk) # Verify that created_at is correctly formatted as UTC assert response_update.created_at is not None diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index 3863f4701a..451dfd9b06 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -368,6 +368,7 @@ async def test_response_format_parse_path() -> None: mock_parsed_response.output_parsed = None mock_parsed_response.usage = None mock_parsed_response.finish_reason = None + mock_parsed_response.conversation = None # No conversation object with patch.object(client.client.responses, "parse", return_value=mock_parsed_response): response = await client.get_response( @@ -454,7 +455,7 @@ async def test_get_streaming_response_with_all_parameters() -> None: def test_response_content_creation_with_annotations() -> None: - """Test _create_response_content with different annotation types.""" + """Test _parse_response_from_openai with different annotation types.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Create a mock response with annotated text content @@ -485,7 +486,7 @@ def test_response_content_creation_with_annotations() -> None: mock_response.output = [mock_message_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert len(response.messages[0].contents) >= 1 assert isinstance(response.messages[0].contents[0], TextContent) @@ -494,7 +495,7 @@ def test_response_content_creation_with_annotations() -> None: def test_response_content_creation_with_refusal() -> None: - """Test _create_response_content with refusal content.""" + """Test _parse_response_from_openai with refusal content.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Create a mock response with refusal content @@ -516,7 +517,7 @@ def test_response_content_creation_with_refusal() -> None: mock_response.output = [mock_message_item] - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert len(response.messages[0].contents) == 1 assert isinstance(response.messages[0].contents[0], TextContent) @@ -524,7 +525,7 @@ def test_response_content_creation_with_refusal() -> None: def test_response_content_creation_with_reasoning() -> None: - """Test _create_response_content with reasoning content.""" + """Test _parse_response_from_openai with reasoning content.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Create a mock response with reasoning content @@ -546,7 +547,7 @@ def test_response_content_creation_with_reasoning() -> None: mock_response.output = [mock_reasoning_item] - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert len(response.messages[0].contents) == 2 assert isinstance(response.messages[0].contents[0], TextReasoningContent) @@ -554,7 +555,7 @@ def test_response_content_creation_with_reasoning() -> None: def test_response_content_creation_with_code_interpreter() -> None: - """Test _create_response_content with code interpreter outputs.""" + """Test _parse_response_from_openai with code interpreter outputs.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -582,7 +583,7 @@ def test_response_content_creation_with_code_interpreter() -> None: mock_response.output = [mock_code_interpreter_item] - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert len(response.messages[0].contents) == 2 assert isinstance(response.messages[0].contents[0], TextContent) @@ -593,7 +594,7 @@ def test_response_content_creation_with_code_interpreter() -> None: def test_response_content_creation_with_function_call() -> None: - """Test _create_response_content with function call content.""" + """Test _parse_response_from_openai with function call content.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Create a mock response with function call @@ -614,7 +615,7 @@ def test_response_content_creation_with_function_call() -> None: mock_response.output = [mock_function_call_item] - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert len(response.messages[0].contents) == 1 assert isinstance(response.messages[0].contents[0], FunctionCallContent) @@ -624,7 +625,7 @@ def test_response_content_creation_with_function_call() -> None: assert function_call.arguments == '{"location": "Seattle"}' -def test_tools_to_response_tools_with_hosted_mcp() -> None: +def test_prepare_tools_for_openai_with_hosted_mcp() -> None: """Test that HostedMCPTool is converted to the correct response tool dict.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -638,7 +639,7 @@ def test_tools_to_response_tools_with_hosted_mcp() -> None: additional_properties={"custom": "value"}, ) - resp_tools = client._tools_to_response_tools([tool]) + resp_tools = client._prepare_tools_for_openai([tool]) assert isinstance(resp_tools, list) assert len(resp_tools) == 1 mcp = resp_tools[0] @@ -654,7 +655,7 @@ def test_tools_to_response_tools_with_hosted_mcp() -> None: assert "require_approval" in mcp -def test_create_response_content_with_mcp_approval_request() -> None: +def test_parse_response_from_openai_with_mcp_approval_request() -> None: """Test that a non-streaming mcp_approval_request is parsed into FunctionApprovalRequestContent.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -675,7 +676,7 @@ def test_create_response_content_with_mcp_approval_request() -> None: mock_response.output = [mock_item] - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert isinstance(response.messages[0].contents[0], FunctionApprovalRequestContent) req = response.messages[0].contents[0] @@ -716,7 +717,7 @@ def test_responses_client_created_at_uses_utc(openai_unit_test_env: dict[str, st mock_response.output = [mock_message_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore # Verify that created_at is correctly formatted as UTC assert response.created_at is not None @@ -730,7 +731,7 @@ def test_responses_client_created_at_uses_utc(openai_unit_test_env: dict[str, st ) -def test_tools_to_response_tools_with_raw_image_generation() -> None: +def test_prepare_tools_for_openai_with_raw_image_generation() -> None: """Test that raw image_generation tool dict is handled correctly with parameter mapping.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -744,7 +745,7 @@ def test_tools_to_response_tools_with_raw_image_generation() -> None: "background": "transparent", } - resp_tools = client._tools_to_response_tools([tool]) + resp_tools = client._prepare_tools_for_openai([tool]) assert isinstance(resp_tools, list) assert len(resp_tools) == 1 @@ -759,7 +760,7 @@ def test_tools_to_response_tools_with_raw_image_generation() -> None: assert image_tool["output_compression"] == 75 -def test_tools_to_response_tools_with_raw_image_generation_openai_responses_params() -> None: +def test_prepare_tools_for_openai_with_raw_image_generation_openai_responses_params() -> None: """Test raw image_generation tool with OpenAI-specific parameters.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -773,7 +774,7 @@ def test_tools_to_response_tools_with_raw_image_generation_openai_responses_para "partial_images": 2, # Should be integer 0-3 } - resp_tools = client._tools_to_response_tools([tool]) + resp_tools = client._prepare_tools_for_openai([tool]) assert isinstance(resp_tools, list) assert len(resp_tools) == 1 @@ -791,14 +792,14 @@ def test_tools_to_response_tools_with_raw_image_generation_openai_responses_para assert tool_dict["partial_images"] == 2 -def test_tools_to_response_tools_with_raw_image_generation_minimal() -> None: +def test_prepare_tools_for_openai_with_raw_image_generation_minimal() -> None: """Test raw image_generation tool with minimal configuration.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test with minimal parameters (just type) tool = {"type": "image_generation"} - resp_tools = client._tools_to_response_tools([tool]) + resp_tools = client._prepare_tools_for_openai([tool]) assert isinstance(resp_tools, list) assert len(resp_tools) == 1 @@ -809,7 +810,7 @@ def test_tools_to_response_tools_with_raw_image_generation_minimal() -> None: assert len(image_tool) == 1 -def test_create_streaming_response_content_with_mcp_approval_request() -> None: +def test_parse_chunk_from_openai_with_mcp_approval_request() -> None: """Test that a streaming mcp_approval_request event is parsed into FunctionApprovalRequestContent.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") chat_options = ChatOptions() @@ -825,7 +826,7 @@ def test_create_streaming_response_content_with_mcp_approval_request() -> None: mock_item.server_label = "My_MCP" mock_event.item = mock_item - update = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) + update = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) assert any(isinstance(c, FunctionApprovalRequestContent) for c in update.contents) fa = next(c for c in update.contents if isinstance(c, FunctionApprovalRequestContent)) assert fa.id == "approval-stream-1" @@ -901,7 +902,7 @@ async def test_end_to_end_mcp_approval_flow(span_exporter) -> None: def test_usage_details_basic() -> None: - """Test _usage_details_from_openai without cached or reasoning tokens.""" + """Test _parse_usage_from_openai without cached or reasoning tokens.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") mock_usage = MagicMock() @@ -911,7 +912,7 @@ def test_usage_details_basic() -> None: mock_usage.input_tokens_details = None mock_usage.output_tokens_details = None - details = client._usage_details_from_openai(mock_usage) # type: ignore + details = client._parse_usage_from_openai(mock_usage) # type: ignore assert details is not None assert details.input_token_count == 100 assert details.output_token_count == 50 @@ -919,7 +920,7 @@ def test_usage_details_basic() -> None: def test_usage_details_with_cached_tokens() -> None: - """Test _usage_details_from_openai with cached input tokens.""" + """Test _parse_usage_from_openai with cached input tokens.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") mock_usage = MagicMock() @@ -930,14 +931,14 @@ def test_usage_details_with_cached_tokens() -> None: mock_usage.input_tokens_details.cached_tokens = 25 mock_usage.output_tokens_details = None - details = client._usage_details_from_openai(mock_usage) # type: ignore + details = client._parse_usage_from_openai(mock_usage) # type: ignore assert details is not None assert details.input_token_count == 200 assert details.additional_counts["openai.cached_input_tokens"] == 25 def test_usage_details_with_reasoning_tokens() -> None: - """Test _usage_details_from_openai with reasoning tokens.""" + """Test _parse_usage_from_openai with reasoning tokens.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") mock_usage = MagicMock() @@ -948,7 +949,7 @@ def test_usage_details_with_reasoning_tokens() -> None: mock_usage.output_tokens_details = MagicMock() mock_usage.output_tokens_details.reasoning_tokens = 30 - details = client._usage_details_from_openai(mock_usage) # type: ignore + details = client._parse_usage_from_openai(mock_usage) # type: ignore assert details is not None assert details.output_token_count == 80 assert details.additional_counts["openai.reasoning_tokens"] == 30 @@ -975,7 +976,7 @@ def test_get_metadata_from_response() -> None: def test_streaming_response_basic_structure() -> None: - """Test that _create_streaming_response_content returns proper structure.""" + """Test that _parse_chunk_from_openai returns proper structure.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") chat_options = ChatOptions(store=True) function_call_ids: dict[int, tuple[str, str]] = {} @@ -983,7 +984,7 @@ def test_streaming_response_basic_structure() -> None: # Test with a basic mock event to ensure the method returns proper structure mock_event = MagicMock() - response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) # type: ignore + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) # type: ignore # Should get a valid ChatResponseUpdate structure assert isinstance(response, ChatResponseUpdate) @@ -1008,7 +1009,7 @@ def test_streaming_annotation_added_with_file_path() -> None: "index": 42, } - response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) assert len(response.contents) == 1 content = response.contents[0] @@ -1035,7 +1036,7 @@ def test_streaming_annotation_added_with_file_citation() -> None: "index": 15, } - response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) assert len(response.contents) == 1 content = response.contents[0] @@ -1064,7 +1065,7 @@ def test_streaming_annotation_added_with_container_file_citation() -> None: "end_index": 50, } - response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) assert len(response.contents) == 1 content = response.contents[0] @@ -1091,7 +1092,7 @@ def test_streaming_annotation_added_with_unknown_type() -> None: "url": "https://example.com", } - response = client._create_streaming_response_content(mock_event, chat_options, function_call_ids) + response = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) # url_citation should not produce HostedFileContent assert len(response.contents) == 0 @@ -1137,8 +1138,8 @@ async def run_streaming(): asyncio.run(run_streaming()) -def test_openai_content_parser_image_content() -> None: - """Test _openai_content_parser with image content variations.""" +def test_prepare_content_for_openai_image_content() -> None: + """Test _prepare_content_for_openai with image content variations.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test image content with detail parameter and file_id @@ -1147,7 +1148,7 @@ def test_openai_content_parser_image_content() -> None: media_type="image/jpeg", additional_properties={"detail": "high", "file_id": "file_123"}, ) - result = client._openai_content_parser(Role.USER, image_content_with_detail, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, image_content_with_detail, {}) # type: ignore assert result["type"] == "input_image" assert result["image_url"] == "https://example.com/image.jpg" assert result["detail"] == "high" @@ -1155,47 +1156,47 @@ def test_openai_content_parser_image_content() -> None: # Test image content without additional properties (defaults) image_content_basic = UriContent(uri="https://example.com/basic.png", media_type="image/png") - result = client._openai_content_parser(Role.USER, image_content_basic, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, image_content_basic, {}) # type: ignore assert result["type"] == "input_image" assert result["detail"] == "auto" assert result["file_id"] is None -def test_openai_content_parser_audio_content() -> None: - """Test _openai_content_parser with audio content variations.""" +def test_prepare_content_for_openai_audio_content() -> None: + """Test _prepare_content_for_openai with audio content variations.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test WAV audio content wav_content = UriContent(uri="data:audio/wav;base64,abc123", media_type="audio/wav") - result = client._openai_content_parser(Role.USER, wav_content, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, wav_content, {}) # type: ignore assert result["type"] == "input_audio" assert result["input_audio"]["data"] == "data:audio/wav;base64,abc123" assert result["input_audio"]["format"] == "wav" # Test MP3 audio content mp3_content = UriContent(uri="data:audio/mp3;base64,def456", media_type="audio/mp3") - result = client._openai_content_parser(Role.USER, mp3_content, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, mp3_content, {}) # type: ignore assert result["type"] == "input_audio" assert result["input_audio"]["format"] == "mp3" -def test_openai_content_parser_unsupported_content() -> None: - """Test _openai_content_parser with unsupported content types.""" +def test_prepare_content_for_openai_unsupported_content() -> None: + """Test _prepare_content_for_openai with unsupported content types.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test unsupported audio format unsupported_audio = UriContent(uri="data:audio/ogg;base64,ghi789", media_type="audio/ogg") - result = client._openai_content_parser(Role.USER, unsupported_audio, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, unsupported_audio, {}) # type: ignore assert result == {} # Test non-media content text_uri_content = UriContent(uri="https://example.com/document.txt", media_type="text/plain") - result = client._openai_content_parser(Role.USER, text_uri_content, {}) # type: ignore + result = client._prepare_content_for_openai(Role.USER, text_uri_content, {}) # type: ignore assert result == {} -def test_create_streaming_response_content_code_interpreter() -> None: - """Test _create_streaming_response_content with code_interpreter_call.""" +def test_parse_chunk_from_openai_code_interpreter() -> None: + """Test _parse_chunk_from_openai with code_interpreter_call.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") chat_options = ChatOptions() function_call_ids: dict[int, tuple[str, str]] = {} @@ -1211,15 +1212,15 @@ def test_create_streaming_response_content_code_interpreter() -> None: mock_item_image.code = None mock_event_image.item = mock_item_image - result = client._create_streaming_response_content(mock_event_image, chat_options, function_call_ids) # type: ignore + result = client._parse_chunk_from_openai(mock_event_image, chat_options, function_call_ids) # type: ignore assert len(result.contents) == 1 assert isinstance(result.contents[0], UriContent) assert result.contents[0].uri == "https://example.com/plot.png" assert result.contents[0].media_type == "image" -def test_create_streaming_response_content_reasoning() -> None: - """Test _create_streaming_response_content with reasoning content.""" +def test_parse_chunk_from_openai_reasoning() -> None: + """Test _parse_chunk_from_openai with reasoning content.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") chat_options = ChatOptions() function_call_ids: dict[int, tuple[str, str]] = {} @@ -1234,7 +1235,7 @@ def test_create_streaming_response_content_reasoning() -> None: mock_item_reasoning.summary = ["Problem analysis summary"] mock_event_reasoning.item = mock_item_reasoning - result = client._create_streaming_response_content(mock_event_reasoning, chat_options, function_call_ids) # type: ignore + result = client._parse_chunk_from_openai(mock_event_reasoning, chat_options, function_call_ids) # type: ignore assert len(result.contents) == 1 assert isinstance(result.contents[0], TextReasoningContent) assert result.contents[0].text == "Analyzing the problem step by step..." @@ -1242,8 +1243,8 @@ def test_create_streaming_response_content_reasoning() -> None: assert result.contents[0].additional_properties["summary"] == "Problem analysis summary" -def test_openai_content_parser_text_reasoning_comprehensive() -> None: - """Test _openai_content_parser with TextReasoningContent all additional properties.""" +def test_prepare_content_for_openai_text_reasoning_comprehensive() -> None: + """Test _prepare_content_for_openai with TextReasoningContent all additional properties.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test TextReasoningContent with all additional properties @@ -1255,7 +1256,7 @@ def test_openai_content_parser_text_reasoning_comprehensive() -> None: "encrypted_content": "secure_data_456", }, ) - result = client._openai_content_parser(Role.ASSISTANT, comprehensive_reasoning, {}) # type: ignore + result = client._prepare_content_for_openai(Role.ASSISTANT, comprehensive_reasoning, {}) # type: ignore assert result["type"] == "reasoning" assert result["summary"]["text"] == "Comprehensive reasoning summary" assert result["status"] == "in_progress" @@ -1280,7 +1281,7 @@ def test_streaming_reasoning_text_delta_event() -> None: ) with patch.object(client, "_get_metadata_from_response", return_value={}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 assert isinstance(response.contents[0], TextReasoningContent) @@ -1305,7 +1306,7 @@ def test_streaming_reasoning_text_done_event() -> None: ) with patch.object(client, "_get_metadata_from_response", return_value={"test": "data"}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 assert isinstance(response.contents[0], TextReasoningContent) @@ -1331,7 +1332,7 @@ def test_streaming_reasoning_summary_text_delta_event() -> None: ) with patch.object(client, "_get_metadata_from_response", return_value={}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 assert isinstance(response.contents[0], TextReasoningContent) @@ -1356,7 +1357,7 @@ def test_streaming_reasoning_summary_text_done_event() -> None: ) with patch.object(client, "_get_metadata_from_response", return_value={"custom": "meta"}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 assert isinstance(response.contents[0], TextReasoningContent) @@ -1392,8 +1393,8 @@ def test_streaming_reasoning_events_preserve_metadata() -> None: ) with patch.object(client, "_get_metadata_from_response", return_value={"test": "metadata"}): - text_response = client._create_streaming_response_content(text_event, chat_options, function_call_ids) # type: ignore - reasoning_response = client._create_streaming_response_content(reasoning_event, chat_options, function_call_ids) # type: ignore + text_response = client._parse_chunk_from_openai(text_event, chat_options, function_call_ids) # type: ignore + reasoning_response = client._parse_chunk_from_openai(reasoning_event, chat_options, function_call_ids) # type: ignore # Both should preserve metadata assert text_response.additional_properties == {"test": "metadata"} @@ -1404,7 +1405,7 @@ def test_streaming_reasoning_events_preserve_metadata() -> None: assert isinstance(reasoning_response.contents[0], TextReasoningContent) -def test_create_response_content_image_generation_raw_base64(): +def test_parse_response_from_openai_image_generation_raw_base64(): """Test image generation response parsing with raw base64 string.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -1428,7 +1429,7 @@ def test_create_response_content_image_generation_raw_base64(): mock_response.output = [mock_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore # Verify the response contains DataContent with proper URI and media_type assert len(response.messages[0].contents) == 1 @@ -1438,7 +1439,7 @@ def test_create_response_content_image_generation_raw_base64(): assert content.media_type == "image/png" -def test_create_response_content_image_generation_existing_data_uri(): +def test_parse_response_from_openai_image_generation_existing_data_uri(): """Test image generation response parsing with existing data URI.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -1461,7 +1462,7 @@ def test_create_response_content_image_generation_existing_data_uri(): mock_response.output = [mock_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore # Verify the response contains DataContent with proper media_type parsed from URI assert len(response.messages[0].contents) == 1 @@ -1471,7 +1472,7 @@ def test_create_response_content_image_generation_existing_data_uri(): assert content.media_type == "image/webp" -def test_create_response_content_image_generation_format_detection(): +def test_parse_response_from_openai_image_generation_format_detection(): """Test different image format detection from base64 data.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -1493,7 +1494,7 @@ def test_create_response_content_image_generation_format_detection(): mock_response_jpeg.output = [mock_item_jpeg] with patch.object(client, "_get_metadata_from_response", return_value={}): - response_jpeg = client._create_response_content(mock_response_jpeg, chat_options=ChatOptions()) # type: ignore + response_jpeg = client._parse_response_from_openai(mock_response_jpeg, chat_options=ChatOptions()) # type: ignore content_jpeg = response_jpeg.messages[0].contents[0] assert isinstance(content_jpeg, DataContent) assert content_jpeg.media_type == "image/jpeg" @@ -1517,14 +1518,14 @@ def test_create_response_content_image_generation_format_detection(): mock_response_webp.output = [mock_item_webp] with patch.object(client, "_get_metadata_from_response", return_value={}): - response_webp = client._create_response_content(mock_response_webp, chat_options=ChatOptions()) # type: ignore + response_webp = client._parse_response_from_openai(mock_response_webp, chat_options=ChatOptions()) # type: ignore content_webp = response_webp.messages[0].contents[0] assert isinstance(content_webp, DataContent) assert content_webp.media_type == "image/webp" assert "data:image/webp;base64," in content_webp.uri -def test_create_response_content_image_generation_fallback(): +def test_parse_response_from_openai_image_generation_fallback(): """Test image generation with invalid base64 falls back to PNG.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -1547,7 +1548,7 @@ def test_create_response_content_image_generation_fallback(): mock_response.output = [mock_item] with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore # Verify it falls back to PNG format for unrecognized binary data assert len(response.messages[0].contents) == 1 @@ -1563,21 +1564,21 @@ async def test_prepare_options_store_parameter_handling() -> None: test_conversation_id = "test-conversation-123" chat_options = ChatOptions(store=True, conversation_id=test_conversation_id) - options = await client.prepare_options(messages, chat_options) + options = await client._prepare_options(messages, chat_options) # type: ignore assert options["store"] is True assert options["previous_response_id"] == test_conversation_id chat_options = ChatOptions(store=False, conversation_id="") - options = await client.prepare_options(messages, chat_options) + options = await client._prepare_options(messages, chat_options) # type: ignore assert options["store"] is False chat_options = ChatOptions(store=None, conversation_id=None) - options = await client.prepare_options(messages, chat_options) + options = await client._prepare_options(messages, chat_options) # type: ignore assert "store" not in options assert "previous_response_id" not in options chat_options = ChatOptions() - options = await client.prepare_options(messages, chat_options) + options = await client._prepare_options(messages, chat_options) # type: ignore assert "store" not in options assert "previous_response_id" not in options diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index be769cba09..1f28c8772c 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -248,7 +248,7 @@ async def _execute_agent( # Get thread from conversation parameter (OpenAI standard!) thread = None - conversation_id = request.get_conversation_id() + conversation_id = request._get_conversation_id() if conversation_id: thread = self.conversation_store.get_thread(conversation_id) if thread: @@ -324,7 +324,7 @@ async def _execute_workflow( entity_id = request.get_entity_id() or "unknown" # Get or create session conversation for checkpoint storage - conversation_id = request.get_conversation_id() + conversation_id = request._get_conversation_id() if not conversation_id: # Create default session if not provided import time diff --git a/python/packages/devui/agent_framework_devui/models/_openai_custom.py b/python/packages/devui/agent_framework_devui/models/_openai_custom.py index f82ef90b72..ac0e74034a 100644 --- a/python/packages/devui/agent_framework_devui/models/_openai_custom.py +++ b/python/packages/devui/agent_framework_devui/models/_openai_custom.py @@ -324,7 +324,7 @@ def get_entity_id(self) -> str | None: return self.metadata.get("entity_id") return None - def get_conversation_id(self) -> str | None: + def _get_conversation_id(self) -> str | None: """Extract conversation_id from conversation parameter. Supports both string and object forms: diff --git a/python/packages/ollama/agent_framework_ollama/_chat_client.py b/python/packages/ollama/agent_framework_ollama/_chat_client.py index 07df5a0d5f..d8856143b8 100644 --- a/python/packages/ollama/agent_framework_ollama/_chat_client.py +++ b/python/packages/ollama/agent_framework_ollama/_chat_client.py @@ -117,9 +117,11 @@ async def _inner_get_response( chat_options: ChatOptions, **kwargs: Any, ) -> ChatResponse: + # prepare options_dict = self._prepare_options(messages, chat_options) try: + # execute response: OllamaChatResponse = await self.client.chat( # type: ignore[misc] stream=False, **options_dict, @@ -128,7 +130,8 @@ async def _inner_get_response( except Exception as ex: raise ServiceResponseException(f"Ollama chat request failed : {ex}", ex) from ex - return self._ollama_response_to_agent_framework_response(response) + # process + return self._parse_response_from_ollama(response) async def _inner_get_streaming_response( self, @@ -137,9 +140,11 @@ async def _inner_get_streaming_response( chat_options: ChatOptions, **kwargs: Any, ) -> AsyncIterable[ChatResponseUpdate]: + # prepare options_dict = self._prepare_options(messages, chat_options) try: + # execute response_object: AsyncIterable[OllamaChatResponse] = await self.client.chat( # type: ignore[misc] stream=True, **options_dict, @@ -148,49 +153,61 @@ async def _inner_get_streaming_response( except Exception as ex: raise ServiceResponseException(f"Ollama streaming chat request failed : {ex}", ex) from ex + # process async for part in response_object: - yield self._ollama_streaming_response_to_agent_framework_response(part) + yield self._parse_streaming_response_from_ollama(part) def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions) -> dict[str, Any]: - # Preprocess web search tool if it exists - options_dict = chat_options.to_dict(exclude={"instructions", "type"}) - - # Promote additional_properties to the top level of options_dict - additional_props = options_dict.pop("additional_properties", {}) - options_dict.update(additional_props) - - # Prepare Messages from Agent Framework format to Ollama format - if messages and "messages" not in options_dict: - options_dict["messages"] = self._prepare_chat_history_for_request(messages) - if "messages" not in options_dict: - raise ServiceInvalidRequestError("Messages are required for chat completions") - - # Prepare Tools from Agent Framework format to Json Schema format - if chat_options.tools: - options_dict["tools"] = self._chat_to_tool_spec(chat_options.tools) - - # Currently Ollama only supports auto tool choice + # tool choice - Currently Ollama only supports auto tool choice if chat_options.tool_choice == "required": raise ServiceInvalidRequestError("Ollama does not support required tool choice.") - # Always auto: remove tool_choice since Ollama does not expose configuration to force or disable tools. - if "tool_choice" in options_dict: - del options_dict["tool_choice"] - # Rename model_id to model for Ollama API, if no model is provided use the one from client initialization - if "model_id" in options_dict: - options_dict["model"] = options_dict.pop("model_id") + run_options = chat_options.to_dict( + exclude={ + "type", + "instructions", + "tool_choice", # Ollama does not support tool_choice configuration + "additional_properties", # handled separately + } + ) + + # messages + if messages and "messages" not in run_options: + run_options["messages"] = self._prepare_messages_for_ollama(messages) + if "messages" not in run_options: + raise ServiceInvalidRequestError("Messages are required for chat completions") - if "model_id" not in options_dict: - options_dict["model"] = self.model_id + # translations between ChatOptions and Ollama API + translations = {"model_id": "model"} + for old_key, new_key in translations.items(): + if old_key in run_options and old_key != new_key: + run_options[new_key] = run_options.pop(old_key) + + # model id + if not run_options.get("model"): + if not self.model_id: + raise ValueError("model_id must be a non-empty string") + run_options["model"] = self.model_id + + # tools + if chat_options.tools and (tools := self._prepare_tools_for_ollama(chat_options.tools)): + run_options["tools"] = tools + + # additional properties + additional_options = { + key: value for key, value in chat_options.additional_properties.items() if value is not None + } + if additional_options: + run_options.update(additional_options) - return options_dict + return run_options - def _prepare_chat_history_for_request(self, messages: MutableSequence[ChatMessage]) -> list[OllamaMessage]: - ollama_messages = [self._agent_framework_message_to_ollama_message(msg) for msg in messages] + def _prepare_messages_for_ollama(self, messages: MutableSequence[ChatMessage]) -> list[OllamaMessage]: + ollama_messages = [self._prepare_message_for_ollama(msg) for msg in messages] # Flatten the list of lists into a single list return list(chain.from_iterable(ollama_messages)) - def _agent_framework_message_to_ollama_message(self, message: ChatMessage) -> list[OllamaMessage]: + def _prepare_message_for_ollama(self, message: ChatMessage) -> list[OllamaMessage]: message_converters: dict[str, Callable[[ChatMessage], list[OllamaMessage]]] = { Role.SYSTEM.value: self._format_system_message, Role.USER.value: self._format_user_message, @@ -250,21 +267,19 @@ def _format_tool_message(self, message: ChatMessage) -> list[OllamaMessage]: if isinstance(item, FunctionResultContent) ] - def _ollama_response_to_agent_framework_content(self, response: OllamaChatResponse) -> list[Contents]: + def _parse_contents_from_ollama(self, response: OllamaChatResponse) -> list[Contents]: contents: list[Contents] = [] if response.message.thinking: contents.append(TextReasoningContent(text=response.message.thinking)) if response.message.content: contents.append(TextContent(text=response.message.content)) if response.message.tool_calls: - tool_calls = self._parse_ollama_tool_calls(response.message.tool_calls) + tool_calls = self._parse_tool_calls_from_ollama(response.message.tool_calls) contents.extend(tool_calls) return contents - def _ollama_streaming_response_to_agent_framework_response( - self, response: OllamaChatResponse - ) -> ChatResponseUpdate: - contents = self._ollama_response_to_agent_framework_content(response) + def _parse_streaming_response_from_ollama(self, response: OllamaChatResponse) -> ChatResponseUpdate: + contents = self._parse_contents_from_ollama(response) return ChatResponseUpdate( contents=contents, role=Role.ASSISTANT, @@ -272,8 +287,8 @@ def _ollama_streaming_response_to_agent_framework_response( created_at=response.created_at, ) - def _ollama_response_to_agent_framework_response(self, response: OllamaChatResponse) -> ChatResponse: - contents = self._ollama_response_to_agent_framework_content(response) + def _parse_response_from_ollama(self, response: OllamaChatResponse) -> ChatResponse: + contents = self._parse_contents_from_ollama(response) return ChatResponse( messages=[ChatMessage(role=Role.ASSISTANT, contents=contents)], @@ -285,7 +300,7 @@ def _ollama_response_to_agent_framework_response(self, response: OllamaChatRespo ), ) - def _parse_ollama_tool_calls(self, tool_calls: Sequence[OllamaMessage.ToolCall]) -> list[Contents]: + def _parse_tool_calls_from_ollama(self, tool_calls: Sequence[OllamaMessage.ToolCall]) -> list[Contents]: resp: list[Contents] = [] for tool in tool_calls: fcc = FunctionCallContent( @@ -297,7 +312,7 @@ def _parse_ollama_tool_calls(self, tool_calls: Sequence[OllamaMessage.ToolCall]) resp.append(fcc) return resp - def _chat_to_tool_spec(self, tools: list[ToolProtocol | MutableMapping[str, Any]]) -> list[dict[str, Any]]: + def _prepare_tools_for_ollama(self, tools: list[ToolProtocol | MutableMapping[str, Any]]) -> list[dict[str, Any]]: chat_tools: list[dict[str, Any]] = [] for tool in tools: if isinstance(tool, ToolProtocol):