Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 10 additions & 7 deletions python/packages/azure-ai/agent_framework_azure_ai/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,7 @@
from agent_framework.openai import OpenAIResponsesOptions
from agent_framework.openai._responses_client import OpenAIBaseResponsesClient
from azure.ai.projects.aio import AIProjectClient
from azure.ai.projects.models import (
MCPTool,
PromptAgentDefinition,
PromptAgentDefinitionText,
RaiConfig,
)
from azure.ai.projects.models import MCPTool, PromptAgentDefinition, PromptAgentDefinitionText, RaiConfig, Reasoning
from azure.core.credentials_async import AsyncTokenCredential
from azure.core.exceptions import ResourceNotFoundError
from pydantic import ValidationError
Expand All @@ -51,12 +46,15 @@
logger = get_logger("agent_framework.azure")


class AzureAIProjectAgentOptions(OpenAIResponsesOptions):
class AzureAIProjectAgentOptions(OpenAIResponsesOptions, total=False):
"""Azure AI Project Agent options."""

rai_config: RaiConfig
"""Configuration for Responsible AI (RAI) content filtering and safety features."""

reasoning: Reasoning # type: ignore[misc]
"""Configuration for enabling reasoning capabilities (requires azure.ai.projects.models.Reasoning)."""


TAzureAIClientOptions = TypeVar(
"TAzureAIClientOptions",
Expand Down Expand Up @@ -343,6 +341,10 @@ async def _get_agent_reference_or_create(
args["temperature"] = run_options["temperature"]
if "top_p" in run_options:
args["top_p"] = run_options["top_p"]
if "reasoning" in run_options:
args["reasoning"] = run_options["reasoning"]
if "rai_config" in run_options:
args["rai_config"] = run_options["rai_config"]

# response_format is accessed from chat_options or additional_properties
# since the base class excludes it from run_options
Expand Down Expand Up @@ -408,6 +410,7 @@ async def _prepare_options(
"top_p",
"text",
"text_format",
"reasoning",
]

for property in exclude:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ async def create_agent(
opts = dict(default_options) if default_options else {}
response_format = opts.get("response_format")
rai_config = opts.get("rai_config")
reasoning = opts.get("reasoning")

args: dict[str, Any] = {"model": resolved_model}

Expand All @@ -206,6 +207,8 @@ async def create_agent(
)
if rai_config:
args["rai_config"] = rai_config
if reasoning:
args["reasoning"] = reasoning

# Normalize tools and separate MCP tools from other tools
normalized_tools = normalize_tools(tools)
Expand Down
44 changes: 44 additions & 0 deletions python/packages/azure-ai/tests/test_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,50 @@ async def test_provider_create_agent_with_rai_config(
assert definition.rai_config is mock_rai_config


async def test_provider_create_agent_with_reasoning(
mock_project_client: MagicMock,
azure_ai_unit_test_env: dict[str, str],
) -> None:
"""Test AzureAIProjectAgentProvider.create_agent passes reasoning from default_options."""
with patch("agent_framework_azure_ai._project_provider.AzureAISettings") as mock_settings:
mock_settings.return_value.project_endpoint = azure_ai_unit_test_env["AZURE_AI_PROJECT_ENDPOINT"]
mock_settings.return_value.model_deployment_name = azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"]

provider = AzureAIProjectAgentProvider(project_client=mock_project_client)

# Mock agent creation response
mock_agent_version = MagicMock(spec=AgentVersionDetails)
mock_agent_version.id = "agent-id"
mock_agent_version.name = "test-agent"
mock_agent_version.version = "1.0"
mock_agent_version.description = None
mock_agent_version.definition = MagicMock(spec=PromptAgentDefinition)
mock_agent_version.definition.model = "gpt-5.2"
mock_agent_version.definition.instructions = None
mock_agent_version.definition.temperature = None
mock_agent_version.definition.top_p = None
mock_agent_version.definition.tools = []

mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent_version)

# Create a mock Reasoning-like object
mock_reasoning = MagicMock()
mock_reasoning.effort = "medium"
mock_reasoning.summary = "concise"

# Call create_agent with reasoning in default_options
await provider.create_agent(
name="test-agent",
model="gpt-5.2",
default_options={"reasoning": mock_reasoning},
)

# Verify reasoning was passed to PromptAgentDefinition
call_args = mock_project_client.agents.create_version.call_args
definition = call_args[1]["definition"]
assert definition.reasoning is mock_reasoning


async def test_provider_get_agent_with_name(mock_project_client: MagicMock) -> None:
"""Test AzureAIProjectAgentProvider.get_agent with name parameter."""
provider = AzureAIProjectAgentProvider(project_client=mock_project_client)
Expand Down
1 change: 0 additions & 1 deletion python/packages/core/agent_framework/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
"FinishReason",
"Role",
"TextSpanRegion",
"TextSpanRegion",
"ToolMode",
"UsageDetails",
"add_usage_details",
Expand Down
1 change: 1 addition & 0 deletions python/samples/getting_started/agents/azure_ai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ This folder contains examples demonstrating different ways to create and use age
| [`azure_ai_with_memory_search.py`](azure_ai_with_memory_search.py) | Shows how to use memory search functionality with Azure AI agents for conversation persistence. Demonstrates creating memory stores and enabling agents to search through conversation history. |
| [`azure_ai_with_microsoft_fabric.py`](azure_ai_with_microsoft_fabric.py) | Shows how to use Microsoft Fabric with Azure AI agents to query Fabric data sources and provide responses based on data analysis. Requires a Microsoft Fabric connection configured in your Azure AI project. |
| [`azure_ai_with_openapi.py`](azure_ai_with_openapi.py) | Shows how to integrate OpenAPI specifications with Azure AI agents using dictionary-based tool configuration. Demonstrates using external REST APIs for dynamic data lookup. |
| [`azure_ai_with_reasoning.py`](azure_ai_with_reasoning.py) | Shows how to enable reasoning for a model that supports it. |
| [`azure_ai_with_web_search.py`](azure_ai_with_web_search.py) | Shows how to use the `HostedWebSearchTool` with Azure AI agents to perform web searches and retrieve up-to-date information from the internet. |

## Environment Variables
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio

from agent_framework.azure import AzureAIProjectAgentProvider
from azure.ai.projects.models import Reasoning
from azure.identity.aio import AzureCliCredential

"""
Azure AI Agent with Reasoning Example

Demonstrates how to enable reasoning capabilities using the Reasoning option.
Shows both non-streaming and streaming approaches, including how to access
reasoning content (type="text_reasoning") separately from answer content.

Requires a reasoning-capable model (e.g., gpt-5.2) deployed in your Azure AI Project configured
as `AZURE_AI_MODEL_DEPLOYMENT_NAME` in your environment.
"""


async def non_streaming_example() -> None:
"""Example of non-streaming response (get the complete result at once)."""
print("=== Non-streaming Response Example ===")

# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="ReasoningWeatherAgent",
instructions="You are a helpful weather agent who likes to understand the underlying physics.",
default_options={"reasoning": Reasoning(effort="medium", summary="concise")},
)

query = "How does the Bernoulli effect work?"
print(f"User: {query}")
result = await agent.run(query)

for msg in result.messages:
for content in msg.contents:
if content.type == "text_reasoning":
print(f"[Reasoning]: {content.text}")
elif content.type == "text":
print(f"[Answer]: {content.text}")
print()


async def streaming_example() -> None:
"""Example of streaming response (get results as they are generated)."""
print("=== Streaming Response Example ===")

# For authentication, run `az login` command in terminal or replace AzureCliCredential with preferred
# authentication option.
async with (
AzureCliCredential() as credential,
AzureAIProjectAgentProvider(credential=credential) as provider,
):
agent = await provider.create_agent(
name="ReasoningWeatherAgent",
instructions="You are a helpful weather agent who likes to understand the underlying physics.",
default_options={"reasoning": Reasoning(effort="medium", summary="concise")},
)

query = "Help explain how air updrafts work?"
print(f"User: {query}")

shown_reasoning_label = False
shown_text_label = False
async for chunk in agent.run_stream(query):
for content in chunk.contents:
if content.type == "text_reasoning":
if not shown_reasoning_label:
print("[Reasoning]: ", end="", flush=True)
shown_reasoning_label = True
print(content.text, end="", flush=True)
elif content.type == "text":
if not shown_text_label:
print("\n\n[Answer]: ", end="", flush=True)
shown_text_label = True
print(content.text, end="", flush=True)
print("\n")


async def main() -> None:
print("=== Azure AI Agent with Reasoning Example ===")

# await non_streaming_example()
await streaming_example()


if __name__ == "__main__":
asyncio.run(main())
Loading
Loading