Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,35 @@
)
from azure.ai.agentserver.responses.hosting import ResponsesAgentServerHost
from azure.ai.agentserver.responses.models import (
ApplyPatchToolCallItemParam,
ApplyPatchToolCallOutputItemParam,
ComputerCallOutputItemParam,
ComputerScreenshotContent,
CreateResponse,
FunctionCallOutputItemParam,
FunctionShellAction,
FunctionShellCallItemParam,
FunctionShellCallOutputContent,
FunctionShellCallOutputExitOutcome,
FunctionShellCallOutputItemParam,
Item,
ItemCodeInterpreterToolCall,
ItemComputerToolCall,
ItemCustomToolCall,
ItemCustomToolCallOutput,
ItemFileSearchToolCall,
ItemFunctionToolCall,
ItemImageGenToolCall,
ItemLocalShellToolCall,
ItemLocalShellToolCallOutput,
ItemMcpApprovalRequest,
ItemMcpToolCall,
ItemMessage,
ItemOutputMessage,
ItemReasoningItem,
ItemWebSearchToolCall,
LocalEnvironmentResource,
MCPApprovalResponse,
MessageContent,
MessageContentInputFileContent,
MessageContentInputImageContent,
Expand Down Expand Up @@ -174,9 +196,11 @@ async def _handle_regular_agent(
context: ResponseContext,
) -> AsyncIterable[ResponseStreamEvent | dict[str, Any]]:
"""Handle the creation of a response for a regular (non-workflow) agent."""
input_text = await context.get_input_text()
input_items = await context.get_input_items()
input_messages = _items_to_messages(input_items)

history = await context.get_history()
messages: list[str | Content | Message] = [*_to_messages(history), input_text]
messages: list[str | Content | Message] = [*_output_items_to_messages(history), *input_messages]

chat_options, are_options_set = _to_chat_options(request)

Comment thread
TaoChenOSU marked this conversation as resolved.
Expand Down Expand Up @@ -243,7 +267,9 @@ async def _handle_workflow_agent(
The sandbox may be deactivated after some period of inactivity, and only data managed
by the hosting infrastructure or files will be preserved upon deactivation.
"""
input_text = await context.get_input_text()
input_items = await context.get_input_items()
input_messages = _items_to_messages(input_items)

is_streaming_request = self._is_streaming_request(request)

_, are_options_set = _to_chat_options(request)
Expand Down Expand Up @@ -296,7 +322,7 @@ async def _handle_workflow_agent(

if not is_streaming_request:
# Run the agent in non-streaming mode
response = await self._agent.run(input_text, stream=False, checkpoint_storage=checkpoint_storage)
response = await self._agent.run(input_messages, stream=False, checkpoint_storage=checkpoint_storage)

for message in response.messages:
for content in message.contents:
Expand All @@ -308,7 +334,7 @@ async def _handle_workflow_agent(
return

# Run the agent in streaming mode
response_stream = self._agent.run(input_text, stream=True, checkpoint_storage=checkpoint_storage)
response_stream = self._agent.run(input_messages, stream=True, checkpoint_storage=checkpoint_storage)

# Track the current active output item builder for streaming;
# lazily created on matching content, closed when a different type arrives.
Expand Down Expand Up @@ -532,7 +558,260 @@ def _to_chat_options(request: CreateResponse) -> tuple[ChatOptions, bool]:
# region Input Message Conversion


def _to_messages(history: Sequence[OutputItem]) -> list[Message]:
def _items_to_messages(input_items: Sequence[Item]) -> list[Message]:
"""Converts a sequence of input items to a list of Messages, one per item.

Args:
input_items: The input items to convert.

Returns:
A list of Messages, one per supported input item.
"""
messages: list[Message] = []
for item in input_items:
messages.append(_item_to_message(item))
return messages


def _item_to_message(item: Item) -> Message:
"""Converts an Item to a Message.

Args:
item: The Item to convert.

Returns:
The converted Message.

Raises:
ValueError: If the Item type is not supported.
"""
if item.type == "message":
msg = cast(ItemMessage, item)
if isinstance(msg.content, str):
return Message(role=msg.role, contents=[Content.from_text(msg.content)])
return Message(role=msg.role, contents=[_convert_message_content(part) for part in msg.content])

if item.type == "output_message":
output_msg = cast(ItemOutputMessage, item)
return Message(
role=output_msg.role, contents=[_convert_output_message_content(part) for part in output_msg.content]
)

if item.type == "function_call":
fc = cast(ItemFunctionToolCall, item)
return Message(
role="assistant",
contents=[Content.from_function_call(fc.call_id, fc.name, arguments=fc.arguments)],
)

if item.type == "function_call_output":
fco = cast(FunctionCallOutputItemParam, item)
output = fco.output if isinstance(fco.output, str) else str(fco.output)
return Message(
role="tool",
contents=[Content.from_function_result(fco.call_id, result=output)],
)

if item.type == "reasoning":
reasoning = cast(ItemReasoningItem, item)
reason_contents: list[Content] = []
if reasoning.summary:
for summary in reasoning.summary:
reason_contents.append(Content.from_text(summary.text))
return Message(role="assistant", contents=reason_contents)

if item.type == "mcp_call":
mcp = cast(ItemMcpToolCall, item)
return Message(
role="assistant",
contents=[
Content.from_mcp_server_tool_call(
mcp.id,
mcp.name,
server_name=mcp.server_label,
arguments=mcp.arguments,
)
],
)

if item.type == "mcp_approval_request":
mcp_req = cast(ItemMcpApprovalRequest, item)
mcp_call_content = Content.from_mcp_server_tool_call(
mcp_req.id,
mcp_req.name,
server_name=mcp_req.server_label,
arguments=mcp_req.arguments,
)
return Message(
role="assistant",
contents=[Content.from_function_approval_request(mcp_req.id, mcp_call_content)],
)

if item.type == "mcp_approval_response":
mcp_resp = cast(MCPApprovalResponse, item)
placeholder_content = Content.from_function_call(mcp_resp.approval_request_id, "mcp_approval")
return Message(
role="user",
contents=[
Content.from_function_approval_response(
mcp_resp.approve, mcp_resp.approval_request_id, placeholder_content
)
],
)

if item.type == "code_interpreter_call":
ci = cast(ItemCodeInterpreterToolCall, item)
return Message(
role="assistant",
contents=[Content.from_code_interpreter_tool_call(call_id=ci.id)],
)

if item.type == "image_generation_call":
ig = cast(ItemImageGenToolCall, item)
return Message(
role="assistant",
contents=[Content.from_image_generation_tool_call(image_id=ig.id)],
)

if item.type == "shell_call":
sc = cast(FunctionShellCallItemParam, item)
return Message(
role="assistant",
contents=[
Content.from_shell_tool_call(
call_id=sc.call_id,
commands=sc.action.commands,
status=str(sc.status),
)
],
)

if item.type == "shell_call_output":
sco = cast(FunctionShellCallOutputItemParam, item)
outputs = [
Content.from_shell_command_output(
stdout=out.stdout or "",
stderr=out.stderr or "",
exit_code=getattr(out.outcome, "exit_code", None) if hasattr(out, "outcome") else None,
)
for out in (sco.output or [])
]
return Message(
role="tool",
contents=[
Content.from_shell_tool_result(
call_id=sco.call_id,
outputs=outputs,
max_output_length=sco.max_output_length,
)
],
)

if item.type == "local_shell_call":
lsc = cast(ItemLocalShellToolCall, item)
commands = lsc.action.command if hasattr(lsc.action, "command") and lsc.action.command else []
return Message(
role="assistant",
contents=[
Content.from_shell_tool_call(
call_id=lsc.call_id,
commands=commands,
status=str(lsc.status),
)
],
)

if item.type == "local_shell_call_output":
lsco = cast(ItemLocalShellToolCallOutput, item)
return Message(
role="tool",
contents=[
Content.from_shell_tool_result(
call_id=lsco.id,
outputs=[Content.from_shell_command_output(stdout=lsco.output)],
)
],
)

if item.type == "file_search_call":
fs = cast(ItemFileSearchToolCall, item)
return Message(
role="assistant",
contents=[
Content.from_function_call(
fs.id,
"file_search",
arguments=json.dumps({"queries": fs.queries}),
)
],
)

if item.type == "web_search_call":
ws = cast(ItemWebSearchToolCall, item)
return Message(
role="assistant",
contents=[Content.from_function_call(ws.id, "web_search")],
)

if item.type == "computer_call":
cc = cast(ItemComputerToolCall, item)
return Message(
role="assistant",
contents=[
Content.from_function_call(
cc.call_id,
"computer_use",
arguments=str(cc.action),
)
],
)

if item.type == "computer_call_output":
cco = cast(ComputerCallOutputItemParam, item)
return Message(
role="tool",
contents=[Content.from_function_result(cco.call_id, result=str(cco.output))],
)

if item.type == "custom_tool_call":
ct = cast(ItemCustomToolCall, item)
return Message(
role="assistant",
contents=[Content.from_function_call(ct.call_id, ct.name, arguments=ct.input)],
)

if item.type == "custom_tool_call_output":
cto = cast(ItemCustomToolCallOutput, item)
output = cto.output if isinstance(cto.output, str) else str(cto.output)
return Message(
role="tool",
contents=[Content.from_function_result(cto.call_id, result=output)],
)

if item.type == "apply_patch_call":
ap = cast(ApplyPatchToolCallItemParam, item)
return Message(
role="assistant",
contents=[
Content.from_function_call(
ap.call_id,
"apply_patch",
arguments=str(ap.operation),
)
],
)

if item.type == "apply_patch_call_output":
apo = cast(ApplyPatchToolCallOutputItemParam, item)
return Message(
role="tool",
contents=[Content.from_function_result(apo.call_id, result=apo.output or "")],
)

raise ValueError(f"Unsupported Item type: {item.type}")


def _output_items_to_messages(history: Sequence[OutputItem]) -> list[Message]:
"""Converts a sequence of OutputItem objects to a list of Message objects.

Args:
Expand All @@ -543,11 +822,11 @@ def _to_messages(history: Sequence[OutputItem]) -> list[Message]:
"""
messages: list[Message] = []
for item in history:
messages.append(_to_message(item))
messages.append(_output_item_to_message(item))
return messages


def _to_message(item: OutputItem) -> Message:
def _output_item_to_message(item: OutputItem) -> Message:
"""Converts an OutputItem to a Message.

Args:
Expand Down
8 changes: 4 additions & 4 deletions python/packages/foundry_hosting/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ description = "Foundry Hosting integration for Microsoft Agent Framework."
authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}]
readme = "README.md"
requires-python = ">=3.10"
version = "1.0.0a260423"
version = "1.0.0a260424"
license-files = ["LICENSE"]
urls.homepage = "https://aka.ms/agent-framework"
urls.source = "https://github.com/microsoft/agent-framework/tree/main/python"
Expand All @@ -24,9 +24,9 @@ classifiers = [
]
dependencies = [
"agent-framework-core>=1.1.1,<2",
"azure-ai-agentserver-core==2.0.0b2",
"azure-ai-agentserver-responses==1.0.0b4",
"azure-ai-agentserver-invocations==1.0.0b2",
"azure-ai-agentserver-core==2.0.0b3",
"azure-ai-agentserver-responses==1.0.0b5",
"azure-ai-agentserver-invocations==1.0.0b3",
]

[tool.uv]
Expand Down
Loading
Loading