diff --git a/.github/.linkspector.yml b/.github/.linkspector.yml index b12cfab559..1305a9d34c 100644 --- a/.github/.linkspector.yml +++ b/.github/.linkspector.yml @@ -1,5 +1,7 @@ -dirs: +dirs: - . +excludedFiles: + - ./python/CHANGELOG.md ignorePatterns: - pattern: "/github/" - pattern: "./actions" @@ -18,7 +20,7 @@ ignorePatterns: # excludedDirs: # Folders which include links to localhost, since it's not ignored with regular expressions baseUrl: https://github.com/microsoft/agent-framework/ -aliveStatusCodes: +aliveStatusCodes: - 200 - 206 - 429 diff --git a/.github/workflows/dotnet-build-and-test.yml b/.github/workflows/dotnet-build-and-test.yml index 7060d252ae..69dc92a9cb 100644 --- a/.github/workflows/dotnet-build-and-test.yml +++ b/.github/workflows/dotnet-build-and-test.yml @@ -160,6 +160,7 @@ jobs: AzureAI__DeploymentName: ${{ vars.AZUREAI__DEPLOYMENTNAME }} AzureAI__BingConnectionId: ${{ vars.AZUREAI__BINGCONECTIONID }} FOUNDRY_PROJECT_ENDPOINT: ${{ vars.FOUNDRY_PROJECT_ENDPOINT }} + FOUNDRY_MEDIA_DEPLOYMENT_NAME: ${{ vars.FOUNDRY_MEDIA_DEPLOYMENT_NAME }} FOUNDRY_MODEL_DEPLOYMENT_NAME: ${{ vars.FOUNDRY_MODEL_DEPLOYMENT_NAME }} FOUNDRY_CONNECTION_GROUNDING_TOOL: ${{ vars.FOUNDRY_CONNECTION_GROUNDING_TOOL }} diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index 10ce37e472..7a6badaba4 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -17,7 +17,8 @@ jobs: fail-fast: true matrix: python-version: ["3.10", "3.11", "3.12", "3.13"] - os: [ubuntu-latest, windows-latest, macos-latest] + # todo: add macos-latest when problems are resolved + os: [ubuntu-latest, windows-latest] env: UV_PYTHON: ${{ matrix.python-version }} permissions: diff --git a/README.md b/README.md index dfdf896922..51e450ef1b 100644 --- a/README.md +++ b/README.md @@ -119,22 +119,37 @@ if __name__ == "__main__": ### Basic Agent - .NET +Create a simple Agent, using OpenAI Responses, that writes a haiku about the Microsoft Agent Framework + +```c# +// dotnet add package Microsoft.Agents.AI.OpenAI --prerelease +using System; +using OpenAI; + +// Replace the with your OpenAI API key. +var agent = new OpenAIClient("") + .GetOpenAIResponseClient("gpt-4o-mini") + .CreateAIAgent(name: "HaikuBot", instructions: "You are an upbeat assistant that writes beautifully."); + +Console.WriteLine(await agent.RunAsync("Write a haiku about Microsoft Agent Framework.")); +``` + +Create a simple Agent, using Azure OpenAI Responses with token based auth, that writes a haiku about the Microsoft Agent Framework + ```c# // dotnet add package Microsoft.Agents.AI.OpenAI --prerelease -// dotnet add package Azure.AI.OpenAI // dotnet add package Azure.Identity // Use `az login` to authenticate with Azure CLI using System; -using Azure.AI.OpenAI; using Azure.Identity; -using Microsoft.Agents.AI; using OpenAI; +using System.ClientModel.Primitives; -var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT")!; -var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME")!; - -var agent = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) - .GetOpenAIResponseClient(deploymentName) +// Replace and gpt-4o-mini with your Azure OpenAI resource name and deployment name. +var agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri("https://.openai.azure.com/openai/v1") }) + .GetOpenAIResponseClient("gpt-4o-mini") .CreateAIAgent(name: "HaikuBot", instructions: "You are an upbeat assistant that writes beautifully."); Console.WriteLine(await agent.RunAsync("Write a haiku about Microsoft Agent Framework.")); diff --git a/dotnet/Directory.Packages.props b/dotnet/Directory.Packages.props index e17c0ec081..d1ed0f9a68 100644 --- a/dotnet/Directory.Packages.props +++ b/dotnet/Directory.Packages.props @@ -10,58 +10,47 @@ 9.5.1 - - + + + + - - + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - + + - @@ -69,29 +58,30 @@ + - - + - - - - - + + + + + - + - - + + + @@ -99,12 +89,14 @@ + + - + - - + + diff --git a/dotnet/README.md b/dotnet/README.md index 1d29dbbc2a..496c29d7bb 100644 --- a/dotnet/README.md +++ b/dotnet/README.md @@ -11,15 +11,16 @@ ### Basic Agent - .NET ```c# -using System; -using Azure.AI.OpenAI; using Azure.Identity; -using Microsoft.Agents.AI; +using OpenAI; +using System.ClientModel.Primitives; var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT")!; var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME")!; -var agent = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) +var agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetOpenAIResponseClient(deploymentName) .CreateAIAgent(name: "HaikuBot", instructions: "You are an upbeat assistant that writes beautifully."); diff --git a/dotnet/agent-framework-dotnet.slnx b/dotnet/agent-framework-dotnet.slnx index 81a2da4a49..400b35d298 100644 --- a/dotnet/agent-framework-dotnet.slnx +++ b/dotnet/agent-framework-dotnet.slnx @@ -113,6 +113,7 @@ + diff --git a/dotnet/nuget/nuget-package.props b/dotnet/nuget/nuget-package.props index 0cc8803cbd..09201fc2e4 100644 --- a/dotnet/nuget/nuget-package.props +++ b/dotnet/nuget/nuget-package.props @@ -2,8 +2,9 @@ 1.0.0 - $(VersionPrefix)-$(VersionSuffix).251007.1 - $(VersionPrefix)-preview.251007.1 + $(VersionPrefix)-$(VersionSuffix).251009.1 + $(VersionPrefix)-preview.251009.1 + 1.0.0-preview.251009.1 Debug;Release;Publish true diff --git a/dotnet/samples/GettingStarted/AgentOpenTelemetry/AgentOpenTelemetry.csproj b/dotnet/samples/GettingStarted/AgentOpenTelemetry/AgentOpenTelemetry.csproj index f9b7b3da2a..d305d3d1ab 100644 --- a/dotnet/samples/GettingStarted/AgentOpenTelemetry/AgentOpenTelemetry.csproj +++ b/dotnet/samples/GettingStarted/AgentOpenTelemetry/AgentOpenTelemetry.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/AgentOpenTelemetry/Program.cs b/dotnet/samples/GettingStarted/AgentOpenTelemetry/Program.cs index 5edfcf53d9..5be0334f82 100644 --- a/dotnet/samples/GettingStarted/AgentOpenTelemetry/Program.cs +++ b/dotnet/samples/GettingStarted/AgentOpenTelemetry/Program.cs @@ -1,15 +1,16 @@ // Copyright (c) Microsoft. All rights reserved. +using System.ClientModel.Primitives; using System.ComponentModel; using System.Diagnostics; using System.Diagnostics.Metrics; -using Azure.AI.OpenAI; using Azure.Identity; using Azure.Monitor.OpenTelemetry.Exporter; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Logging; +using OpenAI; using OpenTelemetry; using OpenTelemetry.Logs; using OpenTelemetry.Metrics; @@ -110,7 +111,9 @@ static async Task GetWeatherAsync([Description("The location to get the return $"The weather in {location} is cloudy with a high of 15°C."; } -using var instrumentedChatClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) +using var instrumentedChatClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetChatClient(deploymentName) .AsIChatClient() // Converts a native OpenAI SDK ChatClient into a Microsoft.Extensions.AI.IChatClient .AsBuilder() diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureFoundryModel/Program.cs b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureFoundryModel/Program.cs index a79b8058b2..264a9e45e8 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureFoundryModel/Program.cs +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureFoundryModel/Program.cs @@ -14,9 +14,6 @@ var apiKey = Environment.GetEnvironmentVariable("AZURE_FOUNDRY_OPENAI_APIKEY"); var model = Environment.GetEnvironmentVariable("AZURE_FOUNDRY_MODEL_DEPLOYMENT") ?? "Phi-4-mini-instruct"; -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - // Since we are using the OpenAI Client SDK, we need to override the default endpoint to point to Azure Foundry. var clientOptions = new OpenAIClientOptions() { Endpoint = new Uri(endpoint) }; @@ -26,8 +23,8 @@ : new OpenAIClient(new ApiKeyCredential(apiKey), clientOptions); AIAgent agent = client - .GetChatClient(model) - .CreateAIAgent(JokerInstructions, JokerName); + .GetChatClient(model) + .CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker"); // Invoke the agent and output the text result. Console.WriteLine(await agent.RunAsync("Tell me a joke about a pirate.")); diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIChatCompletion/Agent_With_AzureOpenAIChatCompletion.csproj b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIChatCompletion/Agent_With_AzureOpenAIChatCompletion.csproj index 0eacdab258..cd545ddb48 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIChatCompletion/Agent_With_AzureOpenAIChatCompletion.csproj +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIChatCompletion/Agent_With_AzureOpenAIChatCompletion.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIChatCompletion/Program.cs b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIChatCompletion/Program.cs index 29a8077aeb..1b266ef0c8 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIChatCompletion/Program.cs +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIChatCompletion/Program.cs @@ -2,7 +2,7 @@ // This sample shows how to create and use a simple AI agent with Azure OpenAI Chat Completion as the backend. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using OpenAI; @@ -10,14 +10,11 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetChatClient(deploymentName) - .CreateAIAgent(JokerInstructions, JokerName); + .CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker"); // Invoke the agent and output the text result. Console.WriteLine(await agent.RunAsync("Tell me a joke about a pirate.")); diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIResponses/Agent_With_AzureOpenAIResponses.csproj b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIResponses/Agent_With_AzureOpenAIResponses.csproj index 0eacdab258..cd545ddb48 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIResponses/Agent_With_AzureOpenAIResponses.csproj +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIResponses/Agent_With_AzureOpenAIResponses.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIResponses/Program.cs b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIResponses/Program.cs index 6b6f7582d5..296f2e4dde 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIResponses/Program.cs +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureOpenAIResponses/Program.cs @@ -2,7 +2,7 @@ // This sample shows how to create and use a simple AI agent with Azure OpenAI Responses as the backend. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using OpenAI; @@ -10,14 +10,11 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetOpenAIResponseClient(deploymentName) - .CreateAIAgent(JokerInstructions, JokerName); + .CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker"); // Invoke the agent and output the text result. Console.WriteLine(await agent.RunAsync("Tell me a joke about a pirate.")); diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_ONNX/Program.cs b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_ONNX/Program.cs index b8db89d2ef..d6c306bfd1 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_ONNX/Program.cs +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_ONNX/Program.cs @@ -10,12 +10,9 @@ // E.g. C:\repos\Phi-4-mini-instruct-onnx\cpu_and_mobile\cpu-int4-rtn-block-32-acc-level-4 var modelPath = Environment.GetEnvironmentVariable("ONNX_MODEL_PATH") ?? throw new InvalidOperationException("ONNX_MODEL_PATH is not set."); -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - // Get a chat client for ONNX and use it to construct an AIAgent. using OnnxRuntimeGenAIChatClient chatClient = new(modelPath); -AIAgent agent = chatClient.CreateAIAgent(JokerInstructions, JokerName); +AIAgent agent = chatClient.CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker"); // Invoke the agent and output the text result. Console.WriteLine(await agent.RunAsync("Tell me a joke about a pirate.")); diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_Ollama/Program.cs b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_Ollama/Program.cs index 8ba07cd634..8cacfef3ef 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_Ollama/Program.cs +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_Ollama/Program.cs @@ -9,12 +9,9 @@ var endpoint = Environment.GetEnvironmentVariable("OLLAMA_ENDPOINT") ?? throw new InvalidOperationException("OLLAMA_ENDPOINT is not set."); var modelName = Environment.GetEnvironmentVariable("OLLAMA_MODEL_NAME") ?? throw new InvalidOperationException("OLLAMA_MODEL_NAME is not set."); -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - // Get a chat client for Ollama and use it to construct an AIAgent. AIAgent agent = new OllamaApiClient(new Uri(endpoint), modelName) - .CreateAIAgent(JokerInstructions, JokerName); + .CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker"); // Invoke the agent and output the text result. Console.WriteLine(await agent.RunAsync("Tell me a joke about a pirate.")); diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_OpenAIChatCompletion/Program.cs b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_OpenAIChatCompletion/Program.cs index 631a7f29ae..9b03c989e1 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_OpenAIChatCompletion/Program.cs +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_OpenAIChatCompletion/Program.cs @@ -8,13 +8,10 @@ var apiKey = Environment.GetEnvironmentVariable("OPENAI_APIKEY") ?? throw new InvalidOperationException("OPENAI_APIKEY is not set."); var model = Environment.GetEnvironmentVariable("OPENAI_MODEL") ?? "gpt-4o-mini"; -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - AIAgent agent = new OpenAIClient( apiKey) .GetChatClient(model) - .CreateAIAgent(JokerInstructions, JokerName); + .CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker"); // Invoke the agent and output the text result. Console.WriteLine(await agent.RunAsync("Tell me a joke about a pirate.")); diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_OpenAIResponses/Program.cs b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_OpenAIResponses/Program.cs index bd1cab2b6f..1abefa0fca 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_OpenAIResponses/Program.cs +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_OpenAIResponses/Program.cs @@ -8,13 +8,10 @@ var apiKey = Environment.GetEnvironmentVariable("OPENAI_APIKEY") ?? throw new InvalidOperationException("OPENAI_APIKEY is not set."); var model = Environment.GetEnvironmentVariable("OPENAI_MODEL") ?? "gpt-4o-mini"; -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - AIAgent agent = new OpenAIClient( apiKey) .GetOpenAIResponseClient(model) - .CreateAIAgent(JokerInstructions, JokerName); + .CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker"); // Invoke the agent and output the text result. Console.WriteLine(await agent.RunAsync("Tell me a joke about a pirate.")); diff --git a/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step01_Running/Program.cs b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step01_Running/Program.cs index faa32d80a0..ccd42a2007 100644 --- a/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step01_Running/Program.cs +++ b/dotnet/samples/GettingStarted/AgentWithOpenAI/Agent_OpenAI_Step01_Running/Program.cs @@ -10,12 +10,9 @@ var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new InvalidOperationException("OPENAI_API_KEY is not set."); var model = Environment.GetEnvironmentVariable("OPENAI_MODEL") ?? "gpt-4o-mini"; -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - AIAgent agent = new OpenAIClient(apiKey) .GetChatClient(model) - .CreateAIAgent(JokerInstructions, JokerName); + .CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker"); UserChatMessage chatMessage = new("Tell me a joke about a pirate."); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step01_Running/Agent_Step01_Running.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step01_Running/Agent_Step01_Running.csproj index 8298cfe6e8..c8b90f35e0 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step01_Running/Agent_Step01_Running.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step01_Running/Agent_Step01_Running.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step01_Running/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step01_Running/Program.cs index d5142381c9..89f1750a61 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step01_Running/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step01_Running/Program.cs @@ -2,7 +2,7 @@ // This sample shows how to create and use a simple AI agent with Azure OpenAI as the backend. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using OpenAI; @@ -10,14 +10,11 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) - .GetChatClient(deploymentName) - .CreateAIAgent(JokerInstructions, JokerName); +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName) + .CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker"); // Invoke the agent and output the text result. Console.WriteLine(await agent.RunAsync("Tell me a joke about a pirate.")); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step02_MultiturnConversation/Agent_Step02_MultiturnConversation.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step02_MultiturnConversation/Agent_Step02_MultiturnConversation.csproj index 8298cfe6e8..c8b90f35e0 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step02_MultiturnConversation/Agent_Step02_MultiturnConversation.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step02_MultiturnConversation/Agent_Step02_MultiturnConversation.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step02_MultiturnConversation/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step02_MultiturnConversation/Program.cs index 17cab99bd4..8ad5b12222 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step02_MultiturnConversation/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step02_MultiturnConversation/Program.cs @@ -2,7 +2,7 @@ // This sample shows how to create and use a simple AI agent with a multi-turn conversation. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using OpenAI; @@ -10,14 +10,11 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) - .GetChatClient(deploymentName) - .CreateAIAgent(JokerInstructions, JokerName); +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName) + .CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker"); // Invoke the agent with a multi-turn conversation, where the context is preserved in the thread object. AgentThread thread = agent.GetNewThread(); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Agent_Step03_UsingFunctionTools.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Agent_Step03_UsingFunctionTools.csproj index 8298cfe6e8..c8b90f35e0 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Agent_Step03_UsingFunctionTools.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Agent_Step03_UsingFunctionTools.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Program.cs index b2287fda4b..d0e7022826 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Program.cs @@ -3,8 +3,8 @@ // This sample demonstrates how to use a ChatClientAgent with function tools. // It shows both non-streaming and streaming agent interactions using menu-related tools. +using System.ClientModel.Primitives; using System.ComponentModel; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -18,11 +18,11 @@ static string GetWeather([Description("The location to get the weather for.")] s => $"The weather in {location} is cloudy with a high of 15°C."; // Create the chat client and agent, and provide the function tool to the agent. -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) - .GetChatClient(deploymentName) - .CreateAIAgent(instructions: "You are a helpful assistant", tools: [AIFunctionFactory.Create(GetWeather)]); +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName) + .CreateAIAgent(instructions: "You are a helpful assistant", tools: [AIFunctionFactory.Create(GetWeather)]); // Non-streaming agent interaction with function tools. Console.WriteLine(await agent.RunAsync("What is the weather like in Amsterdam?")); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step04_UsingFunctionToolsWithApprovals/Agent_Step04_UsingFunctionToolsWithApprovals.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step04_UsingFunctionToolsWithApprovals/Agent_Step04_UsingFunctionToolsWithApprovals.csproj index 8298cfe6e8..c8b90f35e0 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step04_UsingFunctionToolsWithApprovals/Agent_Step04_UsingFunctionToolsWithApprovals.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step04_UsingFunctionToolsWithApprovals/Agent_Step04_UsingFunctionToolsWithApprovals.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step04_UsingFunctionToolsWithApprovals/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step04_UsingFunctionToolsWithApprovals/Program.cs index 31a545dbd6..35078a8ccb 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step04_UsingFunctionToolsWithApprovals/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step04_UsingFunctionToolsWithApprovals/Program.cs @@ -5,8 +5,8 @@ // If the agent is hosted in a service, with a remote user, combine this sample with the Persisted Conversations sample to persist the chat history // while the agent is waiting for user input. +using System.ClientModel.Primitives; using System.ComponentModel; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -22,11 +22,11 @@ static string GetWeather([Description("The location to get the weather for.")] s // Create the chat client and agent. // Note that we are wrapping the function tool with ApprovalRequiredAIFunction to require user approval before invoking it. -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) - .GetChatClient(deploymentName) - .CreateAIAgent(instructions: "You are a helpful assistant", tools: [new ApprovalRequiredAIFunction(AIFunctionFactory.Create(GetWeather))]); +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName) + .CreateAIAgent(instructions: "You are a helpful assistant", tools: [new ApprovalRequiredAIFunction(AIFunctionFactory.Create(GetWeather))]); // Call the agent and check if there are any user input requests to handle. AgentThread thread = agent.GetNewThread(); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step05_StructuredOutput/Agent_Step05_StructuredOutput.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step05_StructuredOutput/Agent_Step05_StructuredOutput.csproj index 8298cfe6e8..c8b90f35e0 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step05_StructuredOutput/Agent_Step05_StructuredOutput.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step05_StructuredOutput/Agent_Step05_StructuredOutput.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step05_StructuredOutput/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step05_StructuredOutput/Program.cs index b18d8e2d84..358a6ae164 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step05_StructuredOutput/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step05_StructuredOutput/Program.cs @@ -2,10 +2,10 @@ // This sample shows how to configure ChatClientAgent to produce structured output. +using System.ClientModel.Primitives; using System.ComponentModel; using System.Text.Json; using System.Text.Json.Serialization; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using OpenAI; @@ -16,9 +16,9 @@ var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; // Create chat client to be used by chat client agents. -ChatClient chatClient = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) +ChatClient chatClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetChatClient(deploymentName); // Create the ChatClientAgent with the specified name and instructions. diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step06_PersistedConversations/Agent_Step06_PersistedConversations.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step06_PersistedConversations/Agent_Step06_PersistedConversations.csproj index 8298cfe6e8..c8b90f35e0 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step06_PersistedConversations/Agent_Step06_PersistedConversations.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step06_PersistedConversations/Agent_Step06_PersistedConversations.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step06_PersistedConversations/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step06_PersistedConversations/Program.cs index 87093525e7..27586031bd 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step06_PersistedConversations/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step06_PersistedConversations/Program.cs @@ -2,8 +2,8 @@ // This sample shows how to create and use a simple AI agent with a conversation that can be persisted to disk. +using System.ClientModel.Primitives; using System.Text.Json; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using OpenAI; @@ -11,15 +11,12 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - // Create the agent -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) - .GetChatClient(deploymentName) - .CreateAIAgent(JokerInstructions, JokerName); +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName) + .CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker"); // Start a new thread for the agent conversation. AgentThread thread = agent.GetNewThread(); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Agent_Step07_3rdPartyThreadStorage.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Agent_Step07_3rdPartyThreadStorage.csproj index 1caf270c49..c8e484f16b 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Agent_Step07_3rdPartyThreadStorage.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Agent_Step07_3rdPartyThreadStorage.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs index 4daf74f569..4914be9b00 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs @@ -4,8 +4,8 @@ // This sample shows how to create and use a simple AI agent with a conversation that can be persisted to disk. +using System.ClientModel.Primitives; using System.Text.Json; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -17,30 +17,27 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - // Create a vector store to store the chat messages in. // Replace this with a vector store implementation of your choice if you want to persist the chat history to disk. VectorStore vectorStore = new InMemoryVectorStore(); // Create the agent -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) - .GetChatClient(deploymentName) - .CreateAIAgent(new ChatClientAgentOptions - { - Name = JokerName, - Instructions = JokerInstructions, - ChatMessageStoreFactory = ctx => - { - // Create a new chat message store for this agent that stores the messages in a vector store. - // Each thread must get its own copy of the VectorChatMessageStore, since the store - // also contains the id that the thread is stored under. - return new VectorChatMessageStore(vectorStore, ctx.SerializedState, ctx.JsonSerializerOptions); - } - }); +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName) + .CreateAIAgent(new ChatClientAgentOptions + { + Instructions = "You are good at telling jokes.", + Name = "Joker", + ChatMessageStoreFactory = ctx => + { + // Create a new chat message store for this agent that stores the messages in a vector store. + // Each thread must get its own copy of the VectorChatMessageStore, since the store + // also contains the id that the thread is stored under. + return new VectorChatMessageStore(vectorStore, ctx.SerializedState, ctx.JsonSerializerOptions); + } + }); // Start a new thread for the agent conversation. AgentThread thread = agent.GetNewThread(); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step08_Observability/Agent_Step08_Observability.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step08_Observability/Agent_Step08_Observability.csproj index 1f8c39c55f..9c97ab4dfd 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step08_Observability/Agent_Step08_Observability.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step08_Observability/Agent_Step08_Observability.csproj @@ -9,8 +9,8 @@ - + diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step08_Observability/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step08_Observability/Program.cs index 2826eb06b0..c05629b838 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step08_Observability/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step08_Observability/Program.cs @@ -2,8 +2,9 @@ // This sample shows how to create and use a simple AI agent with Azure OpenAI as the backend that logs telemetry using OpenTelemetry. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; +using Azure.Monitor.OpenTelemetry.Exporter; using Microsoft.Agents.AI; using OpenAI; using OpenTelemetry; @@ -11,22 +12,26 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; +var applicationInsightsConnectionString = Environment.GetEnvironmentVariable("APPLICATIONINSIGHTS_CONNECTION_STRING"); // Create TracerProvider with console exporter // This will output the telemetry data to the console. string sourceName = Guid.NewGuid().ToString("N"); -using var tracerProvider = Sdk.CreateTracerProviderBuilder() +var tracerProviderBuilder = Sdk.CreateTracerProviderBuilder() .AddSource(sourceName) - .AddConsoleExporter() - .Build(); + .AddConsoleExporter(); +if (!string.IsNullOrWhiteSpace(applicationInsightsConnectionString)) +{ + tracerProviderBuilder.AddAzureMonitorTraceExporter(options => options.ConnectionString = applicationInsightsConnectionString); +} +using var tracerProvider = tracerProviderBuilder.Build(); // Create the agent, and enable OpenTelemetry instrumentation. -AIAgent agent = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetChatClient(deploymentName) - .CreateAIAgent(JokerInstructions, JokerName) + .CreateAIAgent(instructions: "You are good at telling jokes.", name: "Joker") .AsBuilder() .UseOpenTelemetry(sourceName: sourceName) .Build(); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step09_DependencyInjection/Agent_Step09_DependencyInjection.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step09_DependencyInjection/Agent_Step09_DependencyInjection.csproj index b0890e1817..ba12ba3e92 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step09_DependencyInjection/Agent_Step09_DependencyInjection.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step09_DependencyInjection/Agent_Step09_DependencyInjection.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step09_DependencyInjection/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step09_DependencyInjection/Program.cs index 6f83fc26b9..34222955c0 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step09_DependencyInjection/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step09_DependencyInjection/Program.cs @@ -4,12 +4,13 @@ // This sample shows how to use dependency injection to register an AIAgent and use it from a hosted service with a user input chat loop. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; +using OpenAI; var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; @@ -18,14 +19,13 @@ HostApplicationBuilder builder = Host.CreateApplicationBuilder(args); // Add agent options to the service collection. -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; -builder.Services.AddSingleton(new ChatClientAgentOptions(JokerInstructions, JokerName)); +builder.Services.AddSingleton( + new ChatClientAgentOptions(instructions: "You are good at telling jokes.", name: "Joker")); // Add a chat client to the service collection. -builder.Services.AddKeyedChatClient("AzureOpenAI", (sp) => new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) +builder.Services.AddKeyedChatClient("AzureOpenAI", (sp) => new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetChatClient(deploymentName) .AsIChatClient()); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step10_AsMcpTool/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step10_AsMcpTool/Program.cs index 267f10eed3..16bc3cd51e 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step10_AsMcpTool/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step10_AsMcpTool/Program.cs @@ -12,18 +12,14 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_FOUNDRY_PROJECT_ENDPOINT") ?? throw new InvalidOperationException("AZURE_FOUNDRY_PROJECT_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_FOUNDRY_PROJECT_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; -const string JokerName = "Joker"; -const string JokerDescription = "An agent that tells jokes."; -const string JokerInstructions = "You are good at telling jokes, and you always start each joke with 'Aye aye, captain!'."; - var persistentAgentsClient = new PersistentAgentsClient(endpoint, new AzureCliCredential()); // Create a server side persistent agent var agentMetadata = await persistentAgentsClient.Administration.CreateAgentAsync( model: deploymentName, - name: JokerName, - description: JokerDescription, - instructions: JokerInstructions); + instructions: "You are good at telling jokes, and you always start each joke with 'Aye aye, captain!'.", + name: "Joker", + description: "An agent that tells jokes."); // Retrieve the server side persistent agent as an AIAgent. AIAgent agent = await persistentAgentsClient.GetAIAgentAsync(agentMetadata.Value.Id); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step11_UsingImages/Agent_Step11_UsingImages.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step11_UsingImages/Agent_Step11_UsingImages.csproj index 7e9e70c763..22546f5f1d 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step11_UsingImages/Agent_Step11_UsingImages.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step11_UsingImages/Agent_Step11_UsingImages.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step11_UsingImages/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step11_UsingImages/Program.cs index 8e8c5701ad..7ede32fed0 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step11_UsingImages/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step11_UsingImages/Program.cs @@ -2,7 +2,7 @@ // This sample shows how to use Image Multi-Modality with an AI agent. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Extensions.AI; using OpenAI; @@ -10,7 +10,9 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = System.Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o"; -var agent = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) +var agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetChatClient(deploymentName) .CreateAIAgent( name: "VisionAgent", diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step12_AsFunctionTool/Agent_Step12_AsFunctionTool.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step12_AsFunctionTool/Agent_Step12_AsFunctionTool.csproj index 21c8d9e49e..2b0c1f946f 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step12_AsFunctionTool/Agent_Step12_AsFunctionTool.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step12_AsFunctionTool/Agent_Step12_AsFunctionTool.csproj @@ -10,7 +10,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step12_AsFunctionTool/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step12_AsFunctionTool/Program.cs index e26848f06a..f7e76fd945 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step12_AsFunctionTool/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step12_AsFunctionTool/Program.cs @@ -2,8 +2,8 @@ // This sample shows how to create and use a Azure OpenAI AI agent as a function tool. +using System.ClientModel.Primitives; using System.ComponentModel; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -16,10 +16,11 @@ static string GetWeather([Description("The location to get the weather for.")] string location) => $"The weather in {location} is cloudy with a high of 15°C."; +var clientOptions = new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }; +var apiCredential = new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"); + // Create the chat client and agent, and provide the function tool to the agent. -AIAgent weatherAgent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) +AIAgent weatherAgent = new OpenAIClient(apiCredential, clientOptions) .GetChatClient(deploymentName) .CreateAIAgent( instructions: "You answer questions about the weather.", @@ -28,11 +29,9 @@ static string GetWeather([Description("The location to get the weather for.")] s tools: [AIFunctionFactory.Create(GetWeather)]); // Create the main agent, and provide the weather agent as a function tool. -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) - .GetChatClient(deploymentName) - .CreateAIAgent(instructions: "You are a helpful assistant who responds in French.", tools: [weatherAgent.AsAIFunction()]); +AIAgent agent = new OpenAIClient(apiCredential, clientOptions) + .GetChatClient(deploymentName) + .CreateAIAgent(instructions: "You are a helpful assistant who responds in French.", tools: [weatherAgent.AsAIFunction()]); // Invoke the agent and output the text result. Console.WriteLine(await agent.RunAsync("What is the weather like in Amsterdam?")); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Agent_Step13_Memory.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Agent_Step13_Memory.csproj index 8298cfe6e8..c8b90f35e0 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Agent_Step13_Memory.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Agent_Step13_Memory.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Program.cs index ad59deb97f..cafe6ae000 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Program.cs @@ -6,9 +6,9 @@ // The component adds a prompt to ask for this information if it is not already known // and provides it to the model before each invocation if known. +using System.ClientModel.Primitives; using System.Text; using System.Text.Json; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -19,9 +19,9 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; -ChatClient chatClient = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) +ChatClient chatClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetChatClient(deploymentName); // Create the agent and provide a factory to add our custom memory component to diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step14_Middleware/Agent_Step14_Middleware.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step14_Middleware/Agent_Step14_Middleware.csproj index 09beb78195..54a3234c02 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step14_Middleware/Agent_Step14_Middleware.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step14_Middleware/Agent_Step14_Middleware.csproj @@ -11,7 +11,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step14_Middleware/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step14_Middleware/Program.cs index ea59c84ba4..6d2e286fe4 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step14_Middleware/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step14_Middleware/Program.cs @@ -5,20 +5,23 @@ // function invocation (logging and result overrides), and human-in-the-loop // approval workflows for sensitive function calls. +using System.ClientModel.Primitives; using System.ComponentModel; using System.Text.RegularExpressions; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Agents.AI.ChatClient; using Microsoft.Extensions.AI; +using OpenAI; // Get Azure AI Foundry configuration from environment variables var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = System.Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o"; // Get a client to create/retrieve server side agents with -var azureOpenAIClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) +var azureOpenAIClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetChatClient(deploymentName); [Description("Get the weather for a given location.")] diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step15_Plugins/Agent_Step15_Plugins.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step15_Plugins/Agent_Step15_Plugins.csproj index c1cf0bf930..9e7320fcda 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step15_Plugins/Agent_Step15_Plugins.csproj +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step15_Plugins/Agent_Step15_Plugins.csproj @@ -13,7 +13,6 @@ - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step15_Plugins/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step15_Plugins/Program.cs index a9f21339f6..e8db4f13ac 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step15_Plugins/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step15_Plugins/Program.cs @@ -9,7 +9,7 @@ // as AI functions. The AsAITools method of the plugin class shows how to specify // which methods should be exposed to the AI agent. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -27,16 +27,13 @@ IServiceProvider serviceProvider = services.BuildServiceProvider(); -const string AgentName = "Assistant"; -const string AgentInstructions = "You are a helpful assistant that helps people find information."; - -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) - .GetChatClient(deploymentName) - .CreateAIAgent( - instructions: AgentInstructions, - name: AgentName, +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName) + .CreateAIAgent( + instructions: "You are a helpful assistant that helps people find information.", + name: "Assistant", tools: [.. serviceProvider.GetRequiredService().AsAITools()], services: serviceProvider); // Pass the service provider to the agent so it will be available to plugin functions to resolve dependencies. diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step16_ChatReduction/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step16_ChatReduction/Program.cs index 365bdca371..590b5308d5 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step16_ChatReduction/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step16_ChatReduction/Program.cs @@ -14,20 +14,17 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; -const string JokerName = "Joker"; -const string JokerInstructions = "You are good at telling jokes."; - // Construct the agent, and provide a factory to create an in-memory chat message store with a reducer that keeps only the last 2 non-system messages. AIAgent agent = new AzureOpenAIClient( new Uri(endpoint), new AzureCliCredential()) - .GetChatClient(deploymentName) - .CreateAIAgent(new ChatClientAgentOptions - { - Name = JokerName, - Instructions = JokerInstructions, - ChatMessageStoreFactory = ctx => new InMemoryChatMessageStore(new MessageCountingChatReducer(2), ctx.SerializedState, ctx.JsonSerializerOptions) - }); + .GetChatClient(deploymentName) + .CreateAIAgent(new ChatClientAgentOptions + { + Instructions = "You are good at telling jokes.", + Name = "Joker", + ChatMessageStoreFactory = ctx => new InMemoryChatMessageStore(new MessageCountingChatReducer(2), ctx.SerializedState, ctx.JsonSerializerOptions) + }); AgentThread thread = agent.GetNewThread(); diff --git a/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server/Agent_MCP_Server.csproj b/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server/Agent_MCP_Server.csproj index ececa3ea5a..2ce7979d0f 100644 --- a/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server/Agent_MCP_Server.csproj +++ b/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server/Agent_MCP_Server.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server/Program.cs b/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server/Program.cs index 1a0d236961..be40fef3f5 100644 --- a/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server/Program.cs +++ b/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server/Program.cs @@ -2,7 +2,7 @@ // This sample shows how to create and use a simple AI agent with tools from an MCP Server. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -23,9 +23,9 @@ // Retrieve the list of tools available on the GitHub server var mcpTools = await mcpClient.ListToolsAsync().ConfigureAwait(false); -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetChatClient(deploymentName) .CreateAIAgent(instructions: "You answer questions related to GitHub repositories only.", tools: [.. mcpTools.Cast()]); diff --git a/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server_Auth/Agent_MCP_Server_Auth.csproj b/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server_Auth/Agent_MCP_Server_Auth.csproj index e068b6745a..2864668236 100644 --- a/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server_Auth/Agent_MCP_Server_Auth.csproj +++ b/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server_Auth/Agent_MCP_Server_Auth.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server_Auth/Program.cs b/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server_Auth/Program.cs index aae520eec9..1cf983812f 100644 --- a/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server_Auth/Program.cs +++ b/dotnet/samples/GettingStarted/ModelContextProtocol/Agent_MCP_Server_Auth/Program.cs @@ -2,11 +2,11 @@ // This sample shows how to create and use a simple AI agent with tools from an MCP Server that requires authentication. +using System.ClientModel.Primitives; using System.Diagnostics; using System.Net; using System.Text; using System.Web; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.Logging; @@ -46,9 +46,9 @@ // Retrieve the list of tools available on the GitHub server var mcpTools = await mcpClient.ListToolsAsync().ConfigureAwait(false); -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) +AIAgent agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetChatClient(deploymentName) .CreateAIAgent(instructions: "You answer questions related to the weather.", tools: [.. mcpTools]); diff --git a/dotnet/samples/GettingStarted/ModelContextProtocol/FoundryAgent_Hosted_MCP/Program.cs b/dotnet/samples/GettingStarted/ModelContextProtocol/FoundryAgent_Hosted_MCP/Program.cs index eb1741a763..32017af194 100644 --- a/dotnet/samples/GettingStarted/ModelContextProtocol/FoundryAgent_Hosted_MCP/Program.cs +++ b/dotnet/samples/GettingStarted/ModelContextProtocol/FoundryAgent_Hosted_MCP/Program.cs @@ -9,9 +9,6 @@ var endpoint = Environment.GetEnvironmentVariable("AZURE_FOUNDRY_PROJECT_ENDPOINT") ?? throw new InvalidOperationException("AZURE_FOUNDRY_PROJECT_ENDPOINT is not set."); var model = Environment.GetEnvironmentVariable("AZURE_FOUNDRY_PROJECT_MODEL_ID") ?? "gpt-4.1-mini"; -const string AgentName = "MicrosoftLearnAgent"; -const string AgentInstructions = "You answer questions by searching the Microsoft Learn content only."; - // Get a client to create/retrieve server side agents with. var persistentAgentsClient = new PersistentAgentsClient(endpoint, new AzureCliCredential()); @@ -24,8 +21,8 @@ // Create a server side persistent agent with the Azure.AI.Agents.Persistent SDK. var agentMetadata = await persistentAgentsClient.Administration.CreateAgentAsync( model: model, - name: AgentName, - instructions: AgentInstructions, + name: "MicrosoftLearnAgent", + instructions: "You answer questions by searching the Microsoft Learn content only.", tools: [mcpTool]); // Retrieve an already created server side persistent agent as an AIAgent. diff --git a/dotnet/samples/GettingStarted/Workflows/Agents/CustomAgentExecutors/CustomAgentExecutors.csproj b/dotnet/samples/GettingStarted/Workflows/Agents/CustomAgentExecutors/CustomAgentExecutors.csproj index 354163794e..84ac3db0d5 100644 --- a/dotnet/samples/GettingStarted/Workflows/Agents/CustomAgentExecutors/CustomAgentExecutors.csproj +++ b/dotnet/samples/GettingStarted/Workflows/Agents/CustomAgentExecutors/CustomAgentExecutors.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Workflows/Agents/CustomAgentExecutors/Program.cs b/dotnet/samples/GettingStarted/Workflows/Agents/CustomAgentExecutors/Program.cs index 600182974c..30d93e7672 100644 --- a/dotnet/samples/GettingStarted/Workflows/Agents/CustomAgentExecutors/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/Agents/CustomAgentExecutors/Program.cs @@ -1,13 +1,14 @@ // Copyright (c) Microsoft. All rights reserved. +using System.ClientModel.Primitives; using System.Text.Json; using System.Text.Json.Serialization; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Agents.AI.Workflows; using Microsoft.Agents.AI.Workflows.Reflection; using Microsoft.Extensions.AI; +using OpenAI; namespace WorkflowCustomAgentExecutorsSample; @@ -35,7 +36,10 @@ private static async Task Main() // Set up the Azure OpenAI client var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - var chatClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetChatClient(deploymentName).AsIChatClient(); + var chatClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName).AsIChatClient(); // Create the executors var sloganWriter = new SloganWriterExecutor("SloganWriter", chatClient); @@ -134,17 +138,17 @@ public SloganWriterExecutor(string id, IChatClient chatClient) : base(id) this._thread = this._agent.GetNewThread(); } - public async ValueTask HandleAsync(string message, IWorkflowContext context) + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) { - var result = await this._agent.RunAsync(message, this._thread); + var result = await this._agent.RunAsync(message, this._thread, cancellationToken: cancellationToken); var sloganResult = JsonSerializer.Deserialize(result.Text) ?? throw new InvalidOperationException("Failed to deserialize slogan result."); - await context.AddEventAsync(new SloganGeneratedEvent(sloganResult)); + await context.AddEventAsync(new SloganGeneratedEvent(sloganResult), cancellationToken); return sloganResult; } - public async ValueTask HandleAsync(FeedbackResult message, IWorkflowContext context) + public async ValueTask HandleAsync(FeedbackResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { var feedbackMessage = $""" Here is the feedback on your previous slogan: @@ -155,10 +159,10 @@ public async ValueTask HandleAsync(FeedbackResult message, IWorkfl Please use this feedback to improve your slogan. """; - var result = await this._agent.RunAsync(feedbackMessage, this._thread); + var result = await this._agent.RunAsync(feedbackMessage, this._thread, cancellationToken: cancellationToken); var sloganResult = JsonSerializer.Deserialize(result.Text) ?? throw new InvalidOperationException("Failed to deserialize slogan result."); - await context.AddEventAsync(new SloganGeneratedEvent(sloganResult)); + await context.AddEventAsync(new SloganGeneratedEvent(sloganResult), cancellationToken); return sloganResult; } } @@ -205,7 +209,7 @@ public FeedbackExecutor(string id, IChatClient chatClient) : base(id) this._thread = this._agent.GetNewThread(); } - public async ValueTask HandleAsync(SloganResult message, IWorkflowContext context) + public async ValueTask HandleAsync(SloganResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { var sloganMessage = $""" Here is a slogan for the task '{message.Task}': @@ -213,24 +217,24 @@ public async ValueTask HandleAsync(SloganResult message, IWorkflowContext contex Please provide feedback on this slogan, including comments, a rating from 1 to 10, and suggested actions for improvement. """; - var response = await this._agent.RunAsync(sloganMessage, this._thread); + var response = await this._agent.RunAsync(sloganMessage, this._thread, cancellationToken: cancellationToken); var feedback = JsonSerializer.Deserialize(response.Text) ?? throw new InvalidOperationException("Failed to deserialize feedback."); - await context.AddEventAsync(new FeedbackEvent(feedback)); + await context.AddEventAsync(new FeedbackEvent(feedback), cancellationToken); if (feedback.Rating >= this.MinimumRating) { - await context.YieldOutputAsync($"The following slogan was accepted:\n\n{message.Slogan}"); + await context.YieldOutputAsync($"The following slogan was accepted:\n\n{message.Slogan}", cancellationToken); return; } if (this._attempts >= this.MaxAttempts) { - await context.YieldOutputAsync($"The slogan was rejected after {this.MaxAttempts} attempts. Final slogan:\n\n{message.Slogan}"); + await context.YieldOutputAsync($"The slogan was rejected after {this.MaxAttempts} attempts. Final slogan:\n\n{message.Slogan}", cancellationToken); return; } - await context.SendMessageAsync(feedback); + await context.SendMessageAsync(feedback, cancellationToken: cancellationToken); this._attempts++; } } diff --git a/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/Program.cs b/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/Program.cs index 49305d9908..67589cc7fe 100644 --- a/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/Program.cs @@ -1,10 +1,11 @@ // Copyright (c) Microsoft. All rights reserved. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Agents.AI.Workflows; using Microsoft.Extensions.AI; +using OpenAI; namespace WorkflowAsAnAgentsSample; @@ -32,7 +33,10 @@ private static async Task Main() // Set up the Azure OpenAI client var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - var chatClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetChatClient(deploymentName).AsIChatClient(); + var chatClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName).AsIChatClient(); // Create the workflow and turn it into an agent var workflow = await WorkflowHelper.GetWorkflowAsync(chatClient).ConfigureAwait(false); diff --git a/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowAsAnAgent.csproj b/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowAsAnAgent.csproj index 354163794e..84ac3db0d5 100644 --- a/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowAsAnAgent.csproj +++ b/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowAsAnAgent.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowHelper.cs b/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowHelper.cs index 1da5e6932e..3e2261918b 100644 --- a/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowHelper.cs +++ b/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowHelper.cs @@ -51,13 +51,15 @@ private sealed class ConcurrentStartExecutor() : /// /// The user message to process /// Workflow context for accessing workflow services and adding events - public async ValueTask HandleAsync(List message, IWorkflowContext context) + /// The to monitor for cancellation requests. + /// The default is . + public async ValueTask HandleAsync(List message, IWorkflowContext context, CancellationToken cancellationToken = default) { // Broadcast the message to all connected agents. Receiving agents will queue // the message but will not start processing until they receive a turn token. - await context.SendMessageAsync(message); + await context.SendMessageAsync(message, cancellationToken: cancellationToken); // Broadcast the turn token to kick off the agents. - await context.SendMessageAsync(new TurnToken(emitEvents: true)); + await context.SendMessageAsync(new TurnToken(emitEvents: true), cancellationToken: cancellationToken); } } @@ -75,14 +77,16 @@ private sealed class ConcurrentAggregationExecutor() : /// /// The message from the agent /// Workflow context for accessing workflow services and adding events - public async ValueTask HandleAsync(ChatMessage message, IWorkflowContext context) + /// The to monitor for cancellation requests. + /// The default is . + public async ValueTask HandleAsync(ChatMessage message, IWorkflowContext context, CancellationToken cancellationToken = default) { this._messages.Add(message); if (this._messages.Count == 2) { var formattedMessages = string.Join(Environment.NewLine, this._messages.Select(m => $"{m.Text}")); - await context.YieldOutputAsync(formattedMessages); + await context.YieldOutputAsync(formattedMessages, cancellationToken); } } } diff --git a/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndRehydrate/WorkflowHelper.cs b/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndRehydrate/WorkflowHelper.cs index f911fa8a05..cb568fca35 100644 --- a/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndRehydrate/WorkflowHelper.cs +++ b/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndRehydrate/WorkflowHelper.cs @@ -69,20 +69,20 @@ public GuessNumberExecutor(int lowerBound, int upperBound) : this() private int NextGuess => (this.LowerBound + this.UpperBound) / 2; - public async ValueTask HandleAsync(NumberSignal message, IWorkflowContext context) + public async ValueTask HandleAsync(NumberSignal message, IWorkflowContext context, CancellationToken cancellationToken = default) { switch (message) { case NumberSignal.Init: - await context.SendMessageAsync(this.NextGuess).ConfigureAwait(false); + await context.SendMessageAsync(this.NextGuess, cancellationToken: cancellationToken).ConfigureAwait(false); break; case NumberSignal.Above: this.UpperBound = this.NextGuess - 1; - await context.SendMessageAsync(this.NextGuess).ConfigureAwait(false); + await context.SendMessageAsync(this.NextGuess, cancellationToken: cancellationToken).ConfigureAwait(false); break; case NumberSignal.Below: this.LowerBound = this.NextGuess + 1; - await context.SendMessageAsync(this.NextGuess).ConfigureAwait(false); + await context.SendMessageAsync(this.NextGuess, cancellationToken: cancellationToken).ConfigureAwait(false); break; } } @@ -92,14 +92,14 @@ public async ValueTask HandleAsync(NumberSignal message, IWorkflowContext contex /// This must be overridden to save any state that is needed to resume the executor. /// protected override ValueTask OnCheckpointingAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => - context.QueueStateUpdateAsync(StateKey, (this.LowerBound, this.UpperBound)); + context.QueueStateUpdateAsync(StateKey, (this.LowerBound, this.UpperBound), cancellationToken: cancellationToken); /// /// Restore the state of the executor from a checkpoint. /// This must be overridden to restore any state that was saved during checkpointing. /// protected override async ValueTask OnCheckpointRestoredAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => - (this.LowerBound, this.UpperBound) = await context.ReadStateAsync<(int, int)>(StateKey).ConfigureAwait(false); + (this.LowerBound, this.UpperBound) = await context.ReadStateAsync<(int, int)>(StateKey, cancellationToken: cancellationToken).ConfigureAwait(false); } /// @@ -120,20 +120,20 @@ public JudgeExecutor(int targetNumber) : this() this._targetNumber = targetNumber; } - public async ValueTask HandleAsync(int message, IWorkflowContext context) + public async ValueTask HandleAsync(int message, IWorkflowContext context, CancellationToken cancellationToken = default) { this._tries++; if (message == this._targetNumber) { - await context.YieldOutputAsync($"{this._targetNumber} found in {this._tries} tries!").ConfigureAwait(false); + await context.YieldOutputAsync($"{this._targetNumber} found in {this._tries} tries!", cancellationToken: cancellationToken).ConfigureAwait(false); } else if (message < this._targetNumber) { - await context.SendMessageAsync(NumberSignal.Below).ConfigureAwait(false); + await context.SendMessageAsync(NumberSignal.Below, cancellationToken: cancellationToken).ConfigureAwait(false); } else { - await context.SendMessageAsync(NumberSignal.Above).ConfigureAwait(false); + await context.SendMessageAsync(NumberSignal.Above, cancellationToken: cancellationToken).ConfigureAwait(false); } } @@ -142,12 +142,12 @@ public async ValueTask HandleAsync(int message, IWorkflowContext context) /// This must be overridden to save any state that is needed to resume the executor. /// protected override ValueTask OnCheckpointingAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => - context.QueueStateUpdateAsync(StateKey, this._tries); + context.QueueStateUpdateAsync(StateKey, this._tries, cancellationToken: cancellationToken); /// /// Restore the state of the executor from a checkpoint. /// This must be overridden to restore any state that was saved during checkpointing. /// protected override async ValueTask OnCheckpointRestoredAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => - this._tries = await context.ReadStateAsync(StateKey).ConfigureAwait(false); + this._tries = await context.ReadStateAsync(StateKey, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndResume/WorkflowHelper.cs b/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndResume/WorkflowHelper.cs index 8bfabfbf4d..ba6268c924 100644 --- a/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndResume/WorkflowHelper.cs +++ b/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndResume/WorkflowHelper.cs @@ -69,20 +69,20 @@ public GuessNumberExecutor(int lowerBound, int upperBound) : this() private int NextGuess => (this.LowerBound + this.UpperBound) / 2; - public async ValueTask HandleAsync(NumberSignal message, IWorkflowContext context) + public async ValueTask HandleAsync(NumberSignal message, IWorkflowContext context, CancellationToken cancellationToken = default) { switch (message) { case NumberSignal.Init: - await context.SendMessageAsync(this.NextGuess).ConfigureAwait(false); + await context.SendMessageAsync(this.NextGuess, cancellationToken: cancellationToken).ConfigureAwait(false); break; case NumberSignal.Above: this.UpperBound = this.NextGuess - 1; - await context.SendMessageAsync(this.NextGuess).ConfigureAwait(false); + await context.SendMessageAsync(this.NextGuess, cancellationToken: cancellationToken).ConfigureAwait(false); break; case NumberSignal.Below: this.LowerBound = this.NextGuess + 1; - await context.SendMessageAsync(this.NextGuess).ConfigureAwait(false); + await context.SendMessageAsync(this.NextGuess, cancellationToken: cancellationToken).ConfigureAwait(false); break; } } @@ -92,14 +92,14 @@ public async ValueTask HandleAsync(NumberSignal message, IWorkflowContext contex /// This must be overridden to save any state that is needed to resume the executor. /// protected override ValueTask OnCheckpointingAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => - context.QueueStateUpdateAsync(StateKey, (this.LowerBound, this.UpperBound)); + context.QueueStateUpdateAsync(StateKey, (this.LowerBound, this.UpperBound), cancellationToken: cancellationToken); /// /// Restore the state of the executor from a checkpoint. /// This must be overridden to restore any state that was saved during checkpointing. /// protected override async ValueTask OnCheckpointRestoredAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => - (this.LowerBound, this.UpperBound) = await context.ReadStateAsync<(int, int)>(StateKey).ConfigureAwait(false); + (this.LowerBound, this.UpperBound) = await context.ReadStateAsync<(int, int)>(StateKey, cancellationToken: cancellationToken).ConfigureAwait(false); } /// @@ -120,20 +120,20 @@ public JudgeExecutor(int targetNumber) : this() this._targetNumber = targetNumber; } - public async ValueTask HandleAsync(int message, IWorkflowContext context) + public async ValueTask HandleAsync(int message, IWorkflowContext context, CancellationToken cancellationToken = default) { this._tries++; if (message == this._targetNumber) { - await context.YieldOutputAsync($"{this._targetNumber} found in {this._tries} tries!").ConfigureAwait(false); + await context.YieldOutputAsync($"{this._targetNumber} found in {this._tries} tries!", cancellationToken).ConfigureAwait(false); } else if (message < this._targetNumber) { - await context.SendMessageAsync(NumberSignal.Below).ConfigureAwait(false); + await context.SendMessageAsync(NumberSignal.Below, cancellationToken: cancellationToken).ConfigureAwait(false); } else { - await context.SendMessageAsync(NumberSignal.Above).ConfigureAwait(false); + await context.SendMessageAsync(NumberSignal.Above, cancellationToken: cancellationToken).ConfigureAwait(false); } } @@ -142,12 +142,12 @@ public async ValueTask HandleAsync(int message, IWorkflowContext context) /// This must be overridden to save any state that is needed to resume the executor. /// protected override ValueTask OnCheckpointingAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => - context.QueueStateUpdateAsync(StateKey, this._tries); + context.QueueStateUpdateAsync(StateKey, this._tries, cancellationToken: cancellationToken); /// /// Restore the state of the executor from a checkpoint. /// This must be overridden to restore any state that was saved during checkpointing. /// protected override async ValueTask OnCheckpointRestoredAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => - this._tries = await context.ReadStateAsync(StateKey).ConfigureAwait(false); + this._tries = await context.ReadStateAsync(StateKey, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointWithHumanInTheLoop/WorkflowHelper.cs b/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointWithHumanInTheLoop/WorkflowHelper.cs index 42f77ae387..9fabfee873 100644 --- a/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointWithHumanInTheLoop/WorkflowHelper.cs +++ b/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointWithHumanInTheLoop/WorkflowHelper.cs @@ -69,21 +69,21 @@ public JudgeExecutor(int targetNumber) : this() this._targetNumber = targetNumber; } - public async ValueTask HandleAsync(int message, IWorkflowContext context) + public async ValueTask HandleAsync(int message, IWorkflowContext context, CancellationToken cancellationToken = default) { this._tries++; if (message == this._targetNumber) { - await context.YieldOutputAsync($"{this._targetNumber} found in {this._tries} tries!") + await context.YieldOutputAsync($"{this._targetNumber} found in {this._tries} tries!", cancellationToken) .ConfigureAwait(false); } else if (message < this._targetNumber) { - await context.SendMessageAsync(new SignalWithNumber(NumberSignal.Below, message)).ConfigureAwait(false); + await context.SendMessageAsync(new SignalWithNumber(NumberSignal.Below, message), cancellationToken: cancellationToken).ConfigureAwait(false); } else { - await context.SendMessageAsync(new SignalWithNumber(NumberSignal.Above, message)).ConfigureAwait(false); + await context.SendMessageAsync(new SignalWithNumber(NumberSignal.Above, message), cancellationToken: cancellationToken).ConfigureAwait(false); } } @@ -92,12 +92,12 @@ await context.YieldOutputAsync($"{this._targetNumber} found in {this._tries} tri /// This must be overridden to save any state that is needed to resume the executor. /// protected override ValueTask OnCheckpointingAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => - context.QueueStateUpdateAsync(StateKey, this._tries); + context.QueueStateUpdateAsync(StateKey, this._tries, cancellationToken: cancellationToken); /// /// Restore the state of the executor from a checkpoint. /// This must be overridden to restore any state that was saved during checkpointing. /// protected override async ValueTask OnCheckpointRestoredAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => - this._tries = await context.ReadStateAsync(StateKey).ConfigureAwait(false); + this._tries = await context.ReadStateAsync(StateKey, cancellationToken: cancellationToken).ConfigureAwait(false); } diff --git a/dotnet/samples/GettingStarted/Workflows/Concurrent/Concurrent/Concurrent.csproj b/dotnet/samples/GettingStarted/Workflows/Concurrent/Concurrent/Concurrent.csproj index e11fd9fa9a..6ac9ca1d06 100644 --- a/dotnet/samples/GettingStarted/Workflows/Concurrent/Concurrent/Concurrent.csproj +++ b/dotnet/samples/GettingStarted/Workflows/Concurrent/Concurrent/Concurrent.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Workflows/Concurrent/Concurrent/Program.cs b/dotnet/samples/GettingStarted/Workflows/Concurrent/Concurrent/Program.cs index d43474ece0..dee70aa234 100644 --- a/dotnet/samples/GettingStarted/Workflows/Concurrent/Concurrent/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/Concurrent/Concurrent/Program.cs @@ -1,11 +1,12 @@ // Copyright (c) Microsoft. All rights reserved. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Agents.AI.Workflows; using Microsoft.Agents.AI.Workflows.Reflection; using Microsoft.Extensions.AI; +using OpenAI; namespace WorkflowConcurrentSample; @@ -35,7 +36,10 @@ private static async Task Main() // Set up the Azure OpenAI client var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - var chatClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetChatClient(deploymentName).AsIChatClient(); + var chatClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName).AsIChatClient(); // Create the executors ChatClientAgent physicist = new( @@ -82,14 +86,16 @@ internal sealed class ConcurrentStartExecutor() : /// /// The user message to process /// Workflow context for accessing workflow services and adding events + /// The to monitor for cancellation requests. + /// The default is . /// A task representing the asynchronous operation - public async ValueTask HandleAsync(string message, IWorkflowContext context) + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) { // Broadcast the message to all connected agents. Receiving agents will queue // the message but will not start processing until they receive a turn token. - await context.SendMessageAsync(new ChatMessage(ChatRole.User, message)); + await context.SendMessageAsync(new ChatMessage(ChatRole.User, message), cancellationToken: cancellationToken); // Broadcast the turn token to kick off the agents. - await context.SendMessageAsync(new TurnToken(emitEvents: true)); + await context.SendMessageAsync(new TurnToken(emitEvents: true), cancellationToken: cancellationToken); } } @@ -107,15 +113,17 @@ internal sealed class ConcurrentAggregationExecutor() : /// /// The message from the agent /// Workflow context for accessing workflow services and adding events + /// The to monitor for cancellation requests. + /// The default is . /// A task representing the asynchronous operation - public async ValueTask HandleAsync(ChatMessage message, IWorkflowContext context) + public async ValueTask HandleAsync(ChatMessage message, IWorkflowContext context, CancellationToken cancellationToken = default) { this._messages.Add(message); if (this._messages.Count == 2) { var formattedMessages = string.Join(Environment.NewLine, this._messages.Select(m => $"{m.AuthorName}: {m.Text}")); - await context.YieldOutputAsync(formattedMessages); + await context.YieldOutputAsync(formattedMessages, cancellationToken); } } } diff --git a/dotnet/samples/GettingStarted/Workflows/Concurrent/MapReduce/MapReduce.csproj b/dotnet/samples/GettingStarted/Workflows/Concurrent/MapReduce/MapReduce.csproj index 7282e3fde4..02c89044bc 100644 --- a/dotnet/samples/GettingStarted/Workflows/Concurrent/MapReduce/MapReduce.csproj +++ b/dotnet/samples/GettingStarted/Workflows/Concurrent/MapReduce/MapReduce.csproj @@ -12,7 +12,6 @@ - diff --git a/dotnet/samples/GettingStarted/Workflows/Concurrent/MapReduce/Program.cs b/dotnet/samples/GettingStarted/Workflows/Concurrent/MapReduce/Program.cs index a0f059a00e..759f602563 100644 --- a/dotnet/samples/GettingStarted/Workflows/Concurrent/MapReduce/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/Concurrent/MapReduce/Program.cs @@ -5,6 +5,7 @@ using System.IO; using System.Linq; using System.Text.Json; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows; using Microsoft.Agents.AI.Workflows.Reflection; @@ -138,7 +139,7 @@ internal sealed class Split(string[] mapperIds, string id) : /// /// Tokenize input and assign contiguous index ranges to each mapper via shared state. /// - public async ValueTask HandleAsync(string message, IWorkflowContext context) + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) { // Ensure temp directory exists Directory.CreateDirectory(MapReduceConstants.TempDir); @@ -147,7 +148,7 @@ public async ValueTask HandleAsync(string message, IWorkflowContext context) var wordList = Preprocess(message); // Store the tokenized words once so that all mappers can read by index - await context.QueueStateUpdateAsync(MapReduceConstants.DataToProcessKey, wordList, scopeName: MapReduceConstants.StateScope); + await context.QueueStateUpdateAsync(MapReduceConstants.DataToProcessKey, wordList, scopeName: MapReduceConstants.StateScope, cancellationToken); // Divide indices into contiguous slices for each mapper var mapperCount = this._mapperIds.Length; @@ -160,10 +161,10 @@ async Task ProcessChunkAsync(int i) var endIndex = i < mapperCount - 1 ? startIndex + chunkSize : wordList.Length; // Save the indices under the mapper's Id - await context.QueueStateUpdateAsync(this._mapperIds[i], (startIndex, endIndex), scopeName: MapReduceConstants.StateScope); + await context.QueueStateUpdateAsync(this._mapperIds[i], (startIndex, endIndex), scopeName: MapReduceConstants.StateScope, cancellationToken); // Notify the mapper that data is ready - await context.SendMessageAsync(new SplitComplete(), targetId: this._mapperIds[i]); + await context.SendMessageAsync(new SplitComplete(), targetId: this._mapperIds[i], cancellationToken); } // Process all the chunks @@ -192,10 +193,10 @@ internal sealed class Mapper(string id) : ReflectingExecutor(id), IMessa /// /// Read the assigned slice, emit (word, 1) pairs, and persist to disk. /// - public async ValueTask HandleAsync(SplitComplete message, IWorkflowContext context) + public async ValueTask HandleAsync(SplitComplete message, IWorkflowContext context, CancellationToken cancellationToken = default) { - var dataToProcess = await context.ReadStateAsync(MapReduceConstants.DataToProcessKey, scopeName: MapReduceConstants.StateScope); - var chunk = await context.ReadStateAsync<(int start, int end)>(this.Id, scopeName: MapReduceConstants.StateScope); + var dataToProcess = await context.ReadStateAsync(MapReduceConstants.DataToProcessKey, scopeName: MapReduceConstants.StateScope, cancellationToken); + var chunk = await context.ReadStateAsync<(int start, int end)>(this.Id, scopeName: MapReduceConstants.StateScope, cancellationToken); var results = dataToProcess![chunk.start..chunk.end] .Select(word => (word, 1)) @@ -204,9 +205,9 @@ public async ValueTask HandleAsync(SplitComplete message, IWorkflowContext conte // Write this mapper's results as simple text lines for easy debugging var filePath = Path.Combine(MapReduceConstants.TempDir, $"map_results_{this.Id}.txt"); var lines = results.Select(r => $"{r.word}: {r.Item2}"); - await File.WriteAllLinesAsync(filePath, lines); + await File.WriteAllLinesAsync(filePath, lines, cancellationToken); - await context.SendMessageAsync(new MapComplete(filePath)); + await context.SendMessageAsync(new MapComplete(filePath), cancellationToken: cancellationToken); } } @@ -224,7 +225,7 @@ internal sealed class Shuffler(string[] reducerIds, string[] mapperIds, string i /// /// Aggregate mapper outputs and write one partition file per reducer. /// - public async ValueTask HandleAsync(MapComplete message, IWorkflowContext context) + public async ValueTask HandleAsync(MapComplete message, IWorkflowContext context, CancellationToken cancellationToken = default) { this._mapResults.Add(message); @@ -241,9 +242,9 @@ async Task ProcessChunkAsync(List<(string key, List values)> chunk, int ind // Write one grouped partition for reducer index and notify that reducer var filePath = Path.Combine(MapReduceConstants.TempDir, $"shuffle_results_{index}.txt"); var lines = chunk.Select(kvp => $"{kvp.key}: {JsonSerializer.Serialize(kvp.values)}"); - await File.WriteAllLinesAsync(filePath, lines); + await File.WriteAllLinesAsync(filePath, lines, cancellationToken); - await context.SendMessageAsync(new ShuffleComplete(filePath, this._reducerIds[index])); + await context.SendMessageAsync(new ShuffleComplete(filePath, this._reducerIds[index]), cancellationToken: cancellationToken); } var tasks = chunks.Select((chunk, i) => ProcessChunkAsync(chunk, i)); @@ -318,7 +319,7 @@ internal sealed class Reducer(string id) : ReflectingExecutor(id), IMes /// /// Read one shuffle partition and reduce it to totals. /// - public async ValueTask HandleAsync(ShuffleComplete message, IWorkflowContext context) + public async ValueTask HandleAsync(ShuffleComplete message, IWorkflowContext context, CancellationToken cancellationToken = default) { if (message.ReducerId != this.Id) { @@ -327,7 +328,7 @@ public async ValueTask HandleAsync(ShuffleComplete message, IWorkflowContext con } // Read grouped values from the shuffle output - var lines = await File.ReadAllLinesAsync(message.FilePath); + var lines = await File.ReadAllLinesAsync(message.FilePath, cancellationToken); // Sum values per key. Values are serialized JSON arrays like [1, 1, ...] var reducedResults = new Dictionary(); @@ -345,9 +346,9 @@ public async ValueTask HandleAsync(ShuffleComplete message, IWorkflowContext con // Persist our partition totals var filePath = Path.Combine(MapReduceConstants.TempDir, $"reduced_results_{this.Id}.txt"); var outputLines = reducedResults.Select(kvp => $"{kvp.Key}: {kvp.Value}"); - await File.WriteAllLinesAsync(filePath, outputLines); + await File.WriteAllLinesAsync(filePath, outputLines, cancellationToken); - await context.SendMessageAsync(new ReduceComplete(filePath)); + await context.SendMessageAsync(new ReduceComplete(filePath), cancellationToken: cancellationToken); } } @@ -361,10 +362,10 @@ internal sealed class CompletionExecutor(string id) : /// /// Collect reducer output file paths and yield final output. /// - public async ValueTask HandleAsync(List message, IWorkflowContext context) + public async ValueTask HandleAsync(List message, IWorkflowContext context, CancellationToken cancellationToken = default) { var filePaths = message.ConvertAll(r => r.FilePath); - await context.YieldOutputAsync(filePaths); + await context.YieldOutputAsync(filePaths, cancellationToken); } } diff --git a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/01_EdgeCondition/01_EdgeCondition.csproj b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/01_EdgeCondition/01_EdgeCondition.csproj index 76f9509ee1..6a2bf353d7 100644 --- a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/01_EdgeCondition/01_EdgeCondition.csproj +++ b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/01_EdgeCondition/01_EdgeCondition.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/01_EdgeCondition/Program.cs b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/01_EdgeCondition/Program.cs index 65ac2aa852..499fdcaf31 100644 --- a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/01_EdgeCondition/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/01_EdgeCondition/Program.cs @@ -1,13 +1,14 @@ // Copyright (c) Microsoft. All rights reserved. +using System.ClientModel.Primitives; using System.Text.Json; using System.Text.Json.Serialization; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Agents.AI.Workflows; using Microsoft.Agents.AI.Workflows.Reflection; using Microsoft.Extensions.AI; +using OpenAI; namespace WorkflowEdgeConditionSample; @@ -38,7 +39,10 @@ private static async Task Main() // Set up the Azure OpenAI client var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - var chatClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetChatClient(deploymentName).AsIChatClient(); + var chatClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName).AsIChatClient(); // Create agents AIAgent spamDetectionAgent = GetSpamDetectionAgent(chatClient); @@ -160,7 +164,7 @@ public SpamDetectionExecutor(AIAgent spamDetectionAgent) : base("SpamDetectionEx this._spamDetectionAgent = spamDetectionAgent; } - public async ValueTask HandleAsync(ChatMessage message, IWorkflowContext context) + public async ValueTask HandleAsync(ChatMessage message, IWorkflowContext context, CancellationToken cancellationToken = default) { // Generate a random email ID and store the email content to the shared state var newEmail = new Email @@ -168,10 +172,10 @@ public async ValueTask HandleAsync(ChatMessage message, IWorkfl EmailId = Guid.NewGuid().ToString("N"), EmailContent = message.Text }; - await context.QueueStateUpdateAsync(newEmail.EmailId, newEmail, scopeName: EmailStateConstants.EmailStateScope); + await context.QueueStateUpdateAsync(newEmail.EmailId, newEmail, scopeName: EmailStateConstants.EmailStateScope, cancellationToken); // Invoke the agent - var response = await this._spamDetectionAgent.RunAsync(message); + var response = await this._spamDetectionAgent.RunAsync(message, cancellationToken: cancellationToken); var detectionResult = JsonSerializer.Deserialize(response.Text); detectionResult!.EmailId = newEmail.EmailId; @@ -205,7 +209,7 @@ public EmailAssistantExecutor(AIAgent emailAssistantAgent) : base("EmailAssistan this._emailAssistantAgent = emailAssistantAgent; } - public async ValueTask HandleAsync(DetectionResult message, IWorkflowContext context) + public async ValueTask HandleAsync(DetectionResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { if (message.IsSpam) { @@ -213,11 +217,11 @@ public async ValueTask HandleAsync(DetectionResult message, IWork } // Retrieve the email content from the shared state - var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope) + var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope, cancellationToken) ?? throw new InvalidOperationException("Email not found."); // Invoke the agent - var response = await this._emailAssistantAgent.RunAsync(email.EmailContent); + var response = await this._emailAssistantAgent.RunAsync(email.EmailContent, cancellationToken: cancellationToken); var emailResponse = JsonSerializer.Deserialize(response.Text); return emailResponse!; @@ -232,8 +236,8 @@ internal sealed class SendEmailExecutor() : ReflectingExecutor /// Simulate the sending of an email. /// - public async ValueTask HandleAsync(EmailResponse message, IWorkflowContext context) => - await context.YieldOutputAsync($"Email sent: {message.Response}"); + public async ValueTask HandleAsync(EmailResponse message, IWorkflowContext context, CancellationToken cancellationToken = default) => + await context.YieldOutputAsync($"Email sent: {message.Response}", cancellationToken); } /// @@ -244,11 +248,11 @@ internal sealed class HandleSpamExecutor() : ReflectingExecutor /// Simulate the handling of a spam message. /// - public async ValueTask HandleAsync(DetectionResult message, IWorkflowContext context) + public async ValueTask HandleAsync(DetectionResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { if (message.IsSpam) { - await context.YieldOutputAsync($"Email marked as spam: {message.Reason}"); + await context.YieldOutputAsync($"Email marked as spam: {message.Reason}", cancellationToken); } else { diff --git a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/02_SwitchCase/02_SwitchCase.csproj b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/02_SwitchCase/02_SwitchCase.csproj index 76f9509ee1..6a2bf353d7 100644 --- a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/02_SwitchCase/02_SwitchCase.csproj +++ b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/02_SwitchCase/02_SwitchCase.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/02_SwitchCase/Program.cs b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/02_SwitchCase/Program.cs index 4f985f47a2..9083cf5a14 100644 --- a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/02_SwitchCase/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/02_SwitchCase/Program.cs @@ -1,13 +1,14 @@ // Copyright (c) Microsoft. All rights reserved. +using System.ClientModel.Primitives; using System.Text.Json; using System.Text.Json.Serialization; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Agents.AI.Workflows; using Microsoft.Agents.AI.Workflows.Reflection; using Microsoft.Extensions.AI; +using OpenAI; namespace WorkflowSwitchCaseSample; @@ -39,7 +40,10 @@ private static async Task Main() // Set up the Azure OpenAI client var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - var chatClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetChatClient(deploymentName).AsIChatClient(); + var chatClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName).AsIChatClient(); // Create agents AIAgent spamDetectionAgent = GetSpamDetectionAgent(chatClient); @@ -185,7 +189,7 @@ public SpamDetectionExecutor(AIAgent spamDetectionAgent) : base("SpamDetectionEx this._spamDetectionAgent = spamDetectionAgent; } - public async ValueTask HandleAsync(ChatMessage message, IWorkflowContext context) + public async ValueTask HandleAsync(ChatMessage message, IWorkflowContext context, CancellationToken cancellationToken = default) { // Generate a random email ID and store the email content var newEmail = new Email @@ -193,10 +197,10 @@ public async ValueTask HandleAsync(ChatMessage message, IWorkfl EmailId = Guid.NewGuid().ToString("N"), EmailContent = message.Text }; - await context.QueueStateUpdateAsync(newEmail.EmailId, newEmail, scopeName: EmailStateConstants.EmailStateScope); + await context.QueueStateUpdateAsync(newEmail.EmailId, newEmail, scopeName: EmailStateConstants.EmailStateScope, cancellationToken); // Invoke the agent - var response = await this._spamDetectionAgent.RunAsync(message); + var response = await this._spamDetectionAgent.RunAsync(message, cancellationToken: cancellationToken); var detectionResult = JsonSerializer.Deserialize(response.Text); detectionResult!.EmailId = newEmail.EmailId; @@ -230,7 +234,7 @@ public EmailAssistantExecutor(AIAgent emailAssistantAgent) : base("EmailAssistan this._emailAssistantAgent = emailAssistantAgent; } - public async ValueTask HandleAsync(DetectionResult message, IWorkflowContext context) + public async ValueTask HandleAsync(DetectionResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { if (message.spamDecision == SpamDecision.Spam) { @@ -238,10 +242,10 @@ public async ValueTask HandleAsync(DetectionResult message, IWork } // Retrieve the email content from the context - var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope); + var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope, cancellationToken); // Invoke the agent - var response = await this._emailAssistantAgent.RunAsync(email!.EmailContent); + var response = await this._emailAssistantAgent.RunAsync(email!.EmailContent, cancellationToken: cancellationToken); var emailResponse = JsonSerializer.Deserialize(response.Text); return emailResponse!; @@ -256,8 +260,8 @@ internal sealed class SendEmailExecutor() : ReflectingExecutor /// Simulate the sending of an email. /// - public async ValueTask HandleAsync(EmailResponse message, IWorkflowContext context) => - await context.YieldOutputAsync($"Email sent: {message.Response}").ConfigureAwait(false); + public async ValueTask HandleAsync(EmailResponse message, IWorkflowContext context, CancellationToken cancellationToken = default) => + await context.YieldOutputAsync($"Email sent: {message.Response}", cancellationToken).ConfigureAwait(false); } /// @@ -268,11 +272,11 @@ internal sealed class HandleSpamExecutor() : ReflectingExecutor /// Simulate the handling of a spam message. /// - public async ValueTask HandleAsync(DetectionResult message, IWorkflowContext context) + public async ValueTask HandleAsync(DetectionResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { if (message.spamDecision == SpamDecision.Spam) { - await context.YieldOutputAsync($"Email marked as spam: {message.Reason}").ConfigureAwait(false); + await context.YieldOutputAsync($"Email marked as spam: {message.Reason}", cancellationToken).ConfigureAwait(false); } else { @@ -289,12 +293,12 @@ internal sealed class HandleUncertainExecutor() : ReflectingExecutor /// Simulate the handling of an uncertain spam decision. /// - public async ValueTask HandleAsync(DetectionResult message, IWorkflowContext context) + public async ValueTask HandleAsync(DetectionResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { if (message.spamDecision == SpamDecision.Uncertain) { - var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope); - await context.YieldOutputAsync($"Email marked as uncertain: {message.Reason}. Email content: {email?.EmailContent}"); + var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope, cancellationToken); + await context.YieldOutputAsync($"Email marked as uncertain: {message.Reason}. Email content: {email?.EmailContent}", cancellationToken); } else { diff --git a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/03_MultiSelection/03_MultiSelection.csproj b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/03_MultiSelection/03_MultiSelection.csproj index 76f9509ee1..6a2bf353d7 100644 --- a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/03_MultiSelection/03_MultiSelection.csproj +++ b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/03_MultiSelection/03_MultiSelection.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/03_MultiSelection/Program.cs b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/03_MultiSelection/Program.cs index 987dd7ffd6..f11fbbe2fa 100644 --- a/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/03_MultiSelection/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/ConditionalEdges/03_MultiSelection/Program.cs @@ -1,13 +1,14 @@ // Copyright (c) Microsoft. All rights reserved. +using System.ClientModel.Primitives; using System.Text.Json; using System.Text.Json.Serialization; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Agents.AI.Workflows; using Microsoft.Agents.AI.Workflows.Reflection; using Microsoft.Extensions.AI; +using OpenAI; namespace WorkflowMultiSelectionSample; @@ -41,7 +42,10 @@ private static async Task Main() // Set up the Azure OpenAI client var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - var chatClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetChatClient(deploymentName).AsIChatClient(); + var chatClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName).AsIChatClient(); // Create agents AIAgent emailAnalysisAgent = GetEmailAnalysisAgent(chatClient); @@ -241,7 +245,7 @@ public EmailAnalysisExecutor(AIAgent emailAnalysisAgent) : base("EmailAnalysisEx this._emailAnalysisAgent = emailAnalysisAgent; } - public async ValueTask HandleAsync(ChatMessage message, IWorkflowContext context) + public async ValueTask HandleAsync(ChatMessage message, IWorkflowContext context, CancellationToken cancellationToken = default) { // Generate a random email ID and store the email content var newEmail = new Email @@ -249,10 +253,10 @@ public async ValueTask HandleAsync(ChatMessage message, IWorkflo EmailId = Guid.NewGuid().ToString("N"), EmailContent = message.Text }; - await context.QueueStateUpdateAsync(newEmail.EmailId, newEmail, scopeName: EmailStateConstants.EmailStateScope); + await context.QueueStateUpdateAsync(newEmail.EmailId, newEmail, scopeName: EmailStateConstants.EmailStateScope, cancellationToken); // Invoke the agent - var response = await this._emailAnalysisAgent.RunAsync(message); + var response = await this._emailAnalysisAgent.RunAsync(message, cancellationToken: cancellationToken); var AnalysisResult = JsonSerializer.Deserialize(response.Text); AnalysisResult!.EmailId = newEmail.EmailId; @@ -287,7 +291,7 @@ public EmailAssistantExecutor(AIAgent emailAssistantAgent) : base("EmailAssistan this._emailAssistantAgent = emailAssistantAgent; } - public async ValueTask HandleAsync(AnalysisResult message, IWorkflowContext context) + public async ValueTask HandleAsync(AnalysisResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { if (message.spamDecision == SpamDecision.Spam) { @@ -295,10 +299,10 @@ public async ValueTask HandleAsync(AnalysisResult message, IWorkf } // Retrieve the email content from the context - var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope); + var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope, cancellationToken); // Invoke the agent - var response = await this._emailAssistantAgent.RunAsync(email!.EmailContent); + var response = await this._emailAssistantAgent.RunAsync(email!.EmailContent, cancellationToken: cancellationToken); var emailResponse = JsonSerializer.Deserialize(response.Text); return emailResponse!; @@ -313,8 +317,8 @@ internal sealed class SendEmailExecutor() : ReflectingExecutor /// Simulate the sending of an email. /// - public async ValueTask HandleAsync(EmailResponse message, IWorkflowContext context) => - await context.YieldOutputAsync($"Email sent: {message.Response}"); + public async ValueTask HandleAsync(EmailResponse message, IWorkflowContext context, CancellationToken cancellationToken = default) => + await context.YieldOutputAsync($"Email sent: {message.Response}", cancellationToken); } /// @@ -325,11 +329,11 @@ internal sealed class HandleSpamExecutor() : ReflectingExecutor /// Simulate the handling of a spam message. /// - public async ValueTask HandleAsync(AnalysisResult message, IWorkflowContext context) + public async ValueTask HandleAsync(AnalysisResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { if (message.spamDecision == SpamDecision.Spam) { - await context.YieldOutputAsync($"Email marked as spam: {message.Reason}"); + await context.YieldOutputAsync($"Email marked as spam: {message.Reason}", cancellationToken); } else { @@ -346,12 +350,12 @@ internal sealed class HandleUncertainExecutor() : ReflectingExecutor /// Simulate the handling of an uncertain spam decision. /// - public async ValueTask HandleAsync(AnalysisResult message, IWorkflowContext context) + public async ValueTask HandleAsync(AnalysisResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { if (message.spamDecision == SpamDecision.Uncertain) { - var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope); - await context.YieldOutputAsync($"Email marked as uncertain: {message.Reason}. Email content: {email?.EmailContent}"); + var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope, cancellationToken); + await context.YieldOutputAsync($"Email marked as uncertain: {message.Reason}. Email content: {email?.EmailContent}", cancellationToken); } else { @@ -385,13 +389,13 @@ public EmailSummaryExecutor(AIAgent emailSummaryAgent) : base("EmailSummaryExecu this._emailSummaryAgent = emailSummaryAgent; } - public async ValueTask HandleAsync(AnalysisResult message, IWorkflowContext context) + public async ValueTask HandleAsync(AnalysisResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { // Read the email content from the shared states - var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope); + var email = await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope, cancellationToken); // Invoke the agent - var response = await this._emailSummaryAgent.RunAsync(email!.EmailContent); + var response = await this._emailSummaryAgent.RunAsync(email!.EmailContent, cancellationToken: cancellationToken); var emailSummary = JsonSerializer.Deserialize(response.Text); message.EmailSummary = emailSummary!.Summary; @@ -410,17 +414,17 @@ internal sealed class DatabaseEvent(string message) : WorkflowEvent(message) { } /// internal sealed class DatabaseAccessExecutor() : ReflectingExecutor("DatabaseAccessExecutor"), IMessageHandler { - public async ValueTask HandleAsync(AnalysisResult message, IWorkflowContext context) + public async ValueTask HandleAsync(AnalysisResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { // 1. Save the email content - await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope); - await Task.Delay(100); // Simulate database access delay + await context.ReadStateAsync(message.EmailId, scopeName: EmailStateConstants.EmailStateScope, cancellationToken); + await Task.Delay(100, cancellationToken); // Simulate database access delay // 2. Save the analysis result - await Task.Delay(100); // Simulate database access delay + await Task.Delay(100, cancellationToken); // Simulate database access delay // Not using the `WorkflowCompletedEvent` because this is not the end of the workflow. // The end of the workflow is signaled by the `SendEmailExecutor` or the `HandleUnknownExecutor`. - await context.AddEventAsync(new DatabaseEvent($"Email {message.EmailId} saved to database.")); + await context.AddEventAsync(new DatabaseEvent($"Email {message.EmailId} saved to database."), cancellationToken); } } diff --git a/dotnet/samples/GettingStarted/Workflows/Declarative/ExecuteCode/Program.cs b/dotnet/samples/GettingStarted/Workflows/Declarative/ExecuteCode/Program.cs index ca8032f808..e7072c69c1 100644 --- a/dotnet/samples/GettingStarted/Workflows/Declarative/ExecuteCode/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/Declarative/ExecuteCode/Program.cs @@ -76,102 +76,103 @@ private async Task MonitorAndDisposeWorkflowRunAsync(StreamingRun run) string? messageId = null; - await foreach (WorkflowEvent evt in run.WatchStreamAsync().ConfigureAwait(false)) + await foreach (WorkflowEvent workflowEvent in run.WatchStreamAsync().ConfigureAwait(false)) { - if (evt is ExecutorInvokedEvent executorInvoked) + switch (workflowEvent) { - Debug.WriteLine($"STEP ENTER #{executorInvoked.ExecutorId}"); - } - else if (evt is ExecutorCompletedEvent executorComplete) - { - Debug.WriteLine($"STEP EXIT #{executorComplete.ExecutorId}"); - } - else if (evt is ExecutorFailedEvent executorFailure) - { - Debug.WriteLine($"STEP ERROR #{executorFailure.ExecutorId}: {executorFailure.Data?.Message ?? "Unknown"}"); - } - else if (evt is WorkflowErrorEvent workflowError) - { - Debug.WriteLine("WORKFLOW ERROR"); - } - else if (evt is ConversationUpdateEvent invokeEvent) - { - Debug.WriteLine($"CONVERSATION: {invokeEvent.Data}"); - } - else if (evt is AgentRunUpdateEvent streamEvent) - { - if (!string.Equals(messageId, streamEvent.Update.MessageId, StringComparison.Ordinal)) - { - messageId = streamEvent.Update.MessageId; + case ExecutorInvokedEvent executorInvoked: + Debug.WriteLine($"STEP ENTER #{executorInvoked.ExecutorId}"); + break; + + case ExecutorCompletedEvent executorComplete: + Debug.WriteLine($"STEP EXIT #{executorComplete.ExecutorId}"); + break; + + case ExecutorFailedEvent executorFailure: + Debug.WriteLine($"STEP ERROR #{executorFailure.ExecutorId}: {executorFailure.Data?.Message ?? "Unknown"}"); + break; + + case WorkflowErrorEvent workflowError: + throw workflowError.Data as Exception ?? new InvalidOperationException("Unexpected failure..."); + + case ConversationUpdateEvent invokeEvent: + Debug.WriteLine($"CONVERSATION: {invokeEvent.Data}"); + break; - if (messageId is not null) + case AgentRunUpdateEvent streamEvent: + if (!string.Equals(messageId, streamEvent.Update.MessageId, StringComparison.Ordinal)) { - string? agentId = streamEvent.Update.AuthorName; - if (agentId is not null) + messageId = streamEvent.Update.MessageId; + + if (messageId is not null) { - if (!s_nameCache.TryGetValue(agentId, out string? realName)) + string? agentId = streamEvent.Update.AuthorName; + if (agentId is not null) { - PersistentAgent agent = await this.FoundryClient.Administration.GetAgentAsync(agentId); - s_nameCache[agentId] = agent.Name; - realName = agent.Name; + if (!s_nameCache.TryGetValue(agentId, out string? realName)) + { + PersistentAgent agent = await this.FoundryClient.Administration.GetAgentAsync(agentId); + s_nameCache[agentId] = agent.Name; + realName = agent.Name; + } + agentId = realName; } - agentId = realName; + agentId ??= nameof(ChatRole.Assistant); + Console.ForegroundColor = ConsoleColor.Cyan; + Console.Write($"\n{agentId.ToUpperInvariant()}:"); + Console.ForegroundColor = ConsoleColor.DarkGray; + Console.WriteLine($" [{messageId}]"); } - agentId ??= nameof(ChatRole.Assistant); - Console.ForegroundColor = ConsoleColor.Cyan; - Console.Write($"\n{agentId.ToUpperInvariant()}:"); - Console.ForegroundColor = ConsoleColor.DarkGray; - Console.WriteLine($" [{messageId}]"); } - } - - ChatResponseUpdate? chatUpdate = streamEvent.Update.RawRepresentation as ChatResponseUpdate; - switch (chatUpdate?.RawRepresentation) - { - case MessageContentUpdate messageUpdate: - string? fileId = messageUpdate.ImageFileId ?? messageUpdate.TextAnnotation?.OutputFileId; - if (fileId is not null && s_fileCache.Add(fileId)) - { - BinaryData content = await this.FoundryClient.Files.GetFileContentAsync(fileId); - await DownloadFileContentAsync(Path.GetFileName(messageUpdate.TextAnnotation?.TextToReplace ?? "response.png"), content); - } - break; - } - try - { - Console.ResetColor(); - Console.Write(streamEvent.Data); - } - finally - { - Console.ResetColor(); - } - } - else if (evt is AgentRunResponseEvent messageEvent) - { - try - { - Console.WriteLine(); - if (messageEvent.Response.AgentId is null) + + ChatResponseUpdate? chatUpdate = streamEvent.Update.RawRepresentation as ChatResponseUpdate; + switch (chatUpdate?.RawRepresentation) { - Console.ForegroundColor = ConsoleColor.Cyan; - Console.WriteLine("ACTIVITY:"); - Console.ForegroundColor = ConsoleColor.Yellow; - Console.WriteLine(messageEvent.Response?.Text.Trim()); + case MessageContentUpdate messageUpdate: + string? fileId = messageUpdate.ImageFileId ?? messageUpdate.TextAnnotation?.OutputFileId; + if (fileId is not null && s_fileCache.Add(fileId)) + { + BinaryData content = await this.FoundryClient.Files.GetFileContentAsync(fileId); + await DownloadFileContentAsync(Path.GetFileName(messageUpdate.TextAnnotation?.TextToReplace ?? "response.png"), content); + } + break; } - else + try { - if (messageEvent.Response.Usage is not null) + Console.ResetColor(); + Console.Write(streamEvent.Data); + } + finally + { + Console.ResetColor(); + } + break; + + case AgentRunResponseEvent messageEvent: + try + { + Console.WriteLine(); + if (messageEvent.Response.AgentId is null) { - Console.ForegroundColor = ConsoleColor.DarkGray; - Console.WriteLine($"[Tokens Total: {messageEvent.Response.Usage.TotalTokenCount}, Input: {messageEvent.Response.Usage.InputTokenCount}, Output: {messageEvent.Response.Usage.OutputTokenCount}]"); + Console.ForegroundColor = ConsoleColor.Cyan; + Console.WriteLine("ACTIVITY:"); + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine(messageEvent.Response?.Text.Trim()); + } + else + { + if (messageEvent.Response.Usage is not null) + { + Console.ForegroundColor = ConsoleColor.DarkGray; + Console.WriteLine($"[Tokens Total: {messageEvent.Response.Usage.TotalTokenCount}, Input: {messageEvent.Response.Usage.InputTokenCount}, Output: {messageEvent.Response.Usage.OutputTokenCount}]"); + } } } - } - finally - { - Console.ResetColor(); - } + finally + { + Console.ResetColor(); + } + break; } } } diff --git a/dotnet/samples/GettingStarted/Workflows/Declarative/ExecuteWorkflow/Program.cs b/dotnet/samples/GettingStarted/Workflows/Declarative/ExecuteWorkflow/Program.cs index f9754ab22e..8d9b4a6504 100644 --- a/dotnet/samples/GettingStarted/Workflows/Declarative/ExecuteWorkflow/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/Declarative/ExecuteWorkflow/Program.cs @@ -163,6 +163,9 @@ private Program(string workflowFile, string? workflowInput) Debug.WriteLine($"STEP ERROR #{executorFailure.ExecutorId}: {executorFailure.Data?.Message ?? "Unknown"}"); break; + case WorkflowErrorEvent workflowError: + throw workflowError.Data as Exception ?? new InvalidOperationException("Unexpected failure..."); + case SuperStepCompletedEvent checkpointCompleted: this.LastCheckpoint = checkpointCompleted.CompletionInfo?.Checkpoint; Debug.WriteLine($"CHECKPOINT x{checkpointCompleted.StepNumber} [{this.LastCheckpoint?.CheckpointId ?? "(none)"}]"); diff --git a/dotnet/samples/GettingStarted/Workflows/HumanInTheLoop/HumanInTheLoopBasic/WorkflowHelper.cs b/dotnet/samples/GettingStarted/Workflows/HumanInTheLoop/HumanInTheLoopBasic/WorkflowHelper.cs index 8f0f8c7b35..5fcc6cf765 100644 --- a/dotnet/samples/GettingStarted/Workflows/HumanInTheLoop/HumanInTheLoopBasic/WorkflowHelper.cs +++ b/dotnet/samples/GettingStarted/Workflows/HumanInTheLoop/HumanInTheLoopBasic/WorkflowHelper.cs @@ -53,21 +53,21 @@ public JudgeExecutor(int targetNumber) : this() this._targetNumber = targetNumber; } - public async ValueTask HandleAsync(int message, IWorkflowContext context) + public async ValueTask HandleAsync(int message, IWorkflowContext context, CancellationToken cancellationToken = default) { this._tries++; if (message == this._targetNumber) { - await context.YieldOutputAsync($"{this._targetNumber} found in {this._tries} tries!") + await context.YieldOutputAsync($"{this._targetNumber} found in {this._tries} tries!", cancellationToken) .ConfigureAwait(false); } else if (message < this._targetNumber) { - await context.SendMessageAsync(NumberSignal.Below).ConfigureAwait(false); + await context.SendMessageAsync(NumberSignal.Below, cancellationToken: cancellationToken).ConfigureAwait(false); } else { - await context.SendMessageAsync(NumberSignal.Above).ConfigureAwait(false); + await context.SendMessageAsync(NumberSignal.Above, cancellationToken: cancellationToken).ConfigureAwait(false); } } } diff --git a/dotnet/samples/GettingStarted/Workflows/Loop/Program.cs b/dotnet/samples/GettingStarted/Workflows/Loop/Program.cs index 020c193ca0..a5f4168767 100644 --- a/dotnet/samples/GettingStarted/Workflows/Loop/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/Loop/Program.cs @@ -83,20 +83,20 @@ public GuessNumberExecutor(string id, int lowerBound, int upperBound) : base(id) private int NextGuess => (this.LowerBound + this.UpperBound) / 2; - public async ValueTask HandleAsync(NumberSignal message, IWorkflowContext context) + public async ValueTask HandleAsync(NumberSignal message, IWorkflowContext context, CancellationToken cancellationToken = default) { switch (message) { case NumberSignal.Init: - await context.SendMessageAsync(this.NextGuess).ConfigureAwait(false); + await context.SendMessageAsync(this.NextGuess, cancellationToken: cancellationToken).ConfigureAwait(false); break; case NumberSignal.Above: this.UpperBound = this.NextGuess - 1; - await context.SendMessageAsync(this.NextGuess).ConfigureAwait(false); + await context.SendMessageAsync(this.NextGuess, cancellationToken: cancellationToken).ConfigureAwait(false); break; case NumberSignal.Below: this.LowerBound = this.NextGuess + 1; - await context.SendMessageAsync(this.NextGuess).ConfigureAwait(false); + await context.SendMessageAsync(this.NextGuess, cancellationToken: cancellationToken).ConfigureAwait(false); break; } } @@ -120,21 +120,21 @@ public JudgeExecutor(string id, int targetNumber) : base(id) this._targetNumber = targetNumber; } - public async ValueTask HandleAsync(int message, IWorkflowContext context) + public async ValueTask HandleAsync(int message, IWorkflowContext context, CancellationToken cancellationToken = default) { this._tries++; if (message == this._targetNumber) { - await context.YieldOutputAsync($"{this._targetNumber} found in {this._tries} tries!") + await context.YieldOutputAsync($"{this._targetNumber} found in {this._tries} tries!", cancellationToken) .ConfigureAwait(false); } else if (message < this._targetNumber) { - await context.SendMessageAsync(NumberSignal.Below).ConfigureAwait(false); + await context.SendMessageAsync(NumberSignal.Below, cancellationToken: cancellationToken).ConfigureAwait(false); } else { - await context.SendMessageAsync(NumberSignal.Above).ConfigureAwait(false); + await context.SendMessageAsync(NumberSignal.Above, cancellationToken: cancellationToken).ConfigureAwait(false); } } } diff --git a/dotnet/samples/GettingStarted/Workflows/Observability/ApplicationInsights/ApplicationInsights.csproj b/dotnet/samples/GettingStarted/Workflows/Observability/ApplicationInsights/ApplicationInsights.csproj new file mode 100644 index 0000000000..f7a5a4424f --- /dev/null +++ b/dotnet/samples/GettingStarted/Workflows/Observability/ApplicationInsights/ApplicationInsights.csproj @@ -0,0 +1,21 @@ + + + + Exe + net9.0 + + enable + enable + + + + + + + + + + + + + \ No newline at end of file diff --git a/dotnet/samples/GettingStarted/Workflows/Observability/ApplicationInsights/Program.cs b/dotnet/samples/GettingStarted/Workflows/Observability/ApplicationInsights/Program.cs new file mode 100644 index 0000000000..6c3debd003 --- /dev/null +++ b/dotnet/samples/GettingStarted/Workflows/Observability/ApplicationInsights/Program.cs @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics; +using Azure.Monitor.OpenTelemetry.Exporter; +using Microsoft.Agents.AI.Workflows; +using Microsoft.Agents.AI.Workflows.Reflection; +using OpenTelemetry; +using OpenTelemetry.Resources; +using OpenTelemetry.Trace; + +namespace WorkflowObservabilitySample; + +/// +/// This sample shows how to enable observability in a workflow and send the traces +/// to be visualized in Application Insights. +/// +/// In this example, we create a simple text processing pipeline that: +/// 1. Takes input text and converts it to uppercase using an UppercaseExecutor +/// 2. Takes the uppercase text and reverses it using a ReverseTextExecutor +/// +/// The executors are connected sequentially, so data flows from one to the next in order. +/// For input "Hello, World!", the workflow produces "!DLROW ,OLLEH". +/// +public static class Program +{ + private const string SourceName = "Workflow.ApplicationInsightsSample"; + private static readonly ActivitySource s_activitySource = new(SourceName); + + private static async Task Main() + { + var applicationInsightsConnectionString = Environment.GetEnvironmentVariable("APPLICATIONINSIGHTS_CONNECTION_STRING") ?? throw new InvalidOperationException("APPLICATIONINSIGHTS_CONNECTION_STRING is not set."); + + var resourceBuilder = ResourceBuilder + .CreateDefault() + .AddService("WorkflowSample"); + + using var traceProvider = Sdk.CreateTracerProviderBuilder() + .SetResourceBuilder(resourceBuilder) + .AddSource("Microsoft.Agents.AI.Workflows*") + .AddSource(SourceName) + .AddAzureMonitorTraceExporter(options => options.ConnectionString = applicationInsightsConnectionString) + .Build(); + + // Start a root activity for the application + using var activity = s_activitySource.StartActivity("main"); + Console.WriteLine($"Operation/Trace ID: {Activity.Current?.TraceId}"); + + // Create the executors + UppercaseExecutor uppercase = new(); + ReverseTextExecutor reverse = new(); + + // Build the workflow by connecting executors sequentially + var workflow = new WorkflowBuilder(uppercase) + .AddEdge(uppercase, reverse) + .Build(); + + // Execute the workflow with input data + Run run = await InProcessExecution.RunAsync(workflow, "Hello, World!"); + foreach (WorkflowEvent evt in run.NewEvents) + { + if (evt is ExecutorCompletedEvent executorComplete) + { + Console.WriteLine($"{executorComplete.ExecutorId}: {executorComplete.Data}"); + } + } + } +} + +/// +/// First executor: converts input text to uppercase. +/// +internal sealed class UppercaseExecutor() : ReflectingExecutor("UppercaseExecutor"), IMessageHandler +{ + /// + /// Processes the input message by converting it to uppercase. + /// + /// The input text to convert + /// Workflow context for accessing workflow services and adding events + /// The to monitor for cancellation requests. + /// The default is . + /// The input text converted to uppercase + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) => + message.ToUpperInvariant(); // The return value will be sent as a message along an edge to subsequent executors +} + +/// +/// Second executor: reverses the input text and completes the workflow. +/// +internal sealed class ReverseTextExecutor() : ReflectingExecutor("ReverseTextExecutor"), IMessageHandler +{ + /// + /// Processes the input message by reversing the text. + /// + /// The input text to reverse + /// Workflow context for accessing workflow services and adding events + /// The to monitor for cancellation requests. + /// The default is . + /// The input text reversed + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) + => new(message.Reverse().ToArray()); +} diff --git a/dotnet/samples/GettingStarted/Workflows/Observability/AspireDashboard/Program.cs b/dotnet/samples/GettingStarted/Workflows/Observability/AspireDashboard/Program.cs index c02d6bc4a0..ddd88e7709 100644 --- a/dotnet/samples/GettingStarted/Workflows/Observability/AspireDashboard/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/Observability/AspireDashboard/Program.cs @@ -78,8 +78,10 @@ internal sealed class UppercaseExecutor() : ReflectingExecutor /// The input text to convert /// Workflow context for accessing workflow services and adding events + /// The to monitor for cancellation requests. + /// The default is . /// The input text converted to uppercase - public async ValueTask HandleAsync(string message, IWorkflowContext context) => + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) => message.ToUpperInvariant(); // The return value will be sent as a message along an edge to subsequent executors } @@ -93,6 +95,9 @@ internal sealed class ReverseTextExecutor() : ReflectingExecutor /// The input text to reverse /// Workflow context for accessing workflow services and adding events + /// The to monitor for cancellation requests. + /// The default is . /// The input text reversed - public async ValueTask HandleAsync(string message, IWorkflowContext context) => new string(message.Reverse().ToArray()); + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) + => new(message.Reverse().ToArray()); } diff --git a/dotnet/samples/GettingStarted/Workflows/SharedStates/Program.cs b/dotnet/samples/GettingStarted/Workflows/SharedStates/Program.cs index 8812d2689e..246bb776c9 100644 --- a/dotnet/samples/GettingStarted/Workflows/SharedStates/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/SharedStates/Program.cs @@ -54,13 +54,13 @@ internal static class FileContentStateConstants internal sealed class FileReadExecutor() : ReflectingExecutor("FileReadExecutor"), IMessageHandler { - public async ValueTask HandleAsync(string message, IWorkflowContext context) + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) { // Read file content from embedded resource string fileContent = Resources.Read(message); // Store file content in a shared state for access by other executors string fileID = Guid.NewGuid().ToString("N"); - await context.QueueStateUpdateAsync(fileID, fileContent, scopeName: FileContentStateConstants.FileContentStateScope); + await context.QueueStateUpdateAsync(fileID, fileContent, scopeName: FileContentStateConstants.FileContentStateScope, cancellationToken); return fileID; } @@ -74,10 +74,10 @@ internal sealed class FileStats internal sealed class WordCountingExecutor() : ReflectingExecutor("WordCountingExecutor"), IMessageHandler { - public async ValueTask HandleAsync(string message, IWorkflowContext context) + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) { // Retrieve the file content from the shared state - var fileContent = await context.ReadStateAsync(message, scopeName: FileContentStateConstants.FileContentStateScope) + var fileContent = await context.ReadStateAsync(message, scopeName: FileContentStateConstants.FileContentStateScope, cancellationToken) ?? throw new InvalidOperationException("File content state not found"); int wordCount = fileContent.Split([' ', '\n', '\r'], StringSplitOptions.RemoveEmptyEntries).Length; @@ -86,12 +86,13 @@ public async ValueTask HandleAsync(string message, IWorkflowContext c } } -internal sealed class ParagraphCountingExecutor() : ReflectingExecutor("ParagraphCountingExecutor"), IMessageHandler +internal sealed class ParagraphCountingExecutor() : ReflectingExecutor("ParagraphCountingExecutor"), + IMessageHandler { - public async ValueTask HandleAsync(string message, IWorkflowContext context) + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) { // Retrieve the file content from the shared state - var fileContent = await context.ReadStateAsync(message, scopeName: FileContentStateConstants.FileContentStateScope) + var fileContent = await context.ReadStateAsync(message, scopeName: FileContentStateConstants.FileContentStateScope, cancellationToken) ?? throw new InvalidOperationException("File content state not found"); int paragraphCount = fileContent.Split(['\n', '\r'], StringSplitOptions.RemoveEmptyEntries).Length; @@ -104,7 +105,7 @@ internal sealed class AggregationExecutor() : ReflectingExecutor _messages = []; - public async ValueTask HandleAsync(FileStats message, IWorkflowContext context) + public async ValueTask HandleAsync(FileStats message, IWorkflowContext context, CancellationToken cancellationToken = default) { this._messages.Add(message); @@ -113,7 +114,7 @@ public async ValueTask HandleAsync(FileStats message, IWorkflowContext context) // Aggregate the results from both executors var totalParagraphCount = this._messages.Sum(m => m.ParagraphCount); var totalWordCount = this._messages.Sum(m => m.WordCount); - await context.YieldOutputAsync($"Total Paragraphs: {totalParagraphCount}, Total Words: {totalWordCount}"); + await context.YieldOutputAsync($"Total Paragraphs: {totalParagraphCount}, Total Words: {totalWordCount}", cancellationToken); } } } diff --git a/dotnet/samples/GettingStarted/Workflows/_Foundational/01_ExecutorsAndEdges/Program.cs b/dotnet/samples/GettingStarted/Workflows/_Foundational/01_ExecutorsAndEdges/Program.cs index b3712eed11..ba70329383 100644 --- a/dotnet/samples/GettingStarted/Workflows/_Foundational/01_ExecutorsAndEdges/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/_Foundational/01_ExecutorsAndEdges/Program.cs @@ -51,8 +51,10 @@ internal sealed class UppercaseExecutor() : ReflectingExecutor /// The input text to convert /// Workflow context for accessing workflow services and adding events + /// The to monitor for cancellation requests. + /// The default is . /// The input text converted to uppercase - public async ValueTask HandleAsync(string message, IWorkflowContext context) => + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) => message.ToUpperInvariant(); // The return value will be sent as a message along an edge to subsequent executors } @@ -66,8 +68,10 @@ internal sealed class ReverseTextExecutor() : ReflectingExecutor /// The input text to reverse /// Workflow context for accessing workflow services and adding events + /// The to monitor for cancellation requests. + /// The default is . /// The input text reversed - public async ValueTask HandleAsync(string message, IWorkflowContext context) + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) { // Because we do not suppress it, the returned result will be yielded as an output from this executor. return string.Concat(message.Reverse()); diff --git a/dotnet/samples/GettingStarted/Workflows/_Foundational/02_Streaming/Program.cs b/dotnet/samples/GettingStarted/Workflows/_Foundational/02_Streaming/Program.cs index c3c1370b84..5ffd64aedb 100644 --- a/dotnet/samples/GettingStarted/Workflows/_Foundational/02_Streaming/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/_Foundational/02_Streaming/Program.cs @@ -50,8 +50,10 @@ internal sealed class UppercaseExecutor() : ReflectingExecutor /// The input text to convert /// Workflow context for accessing workflow services and adding events + /// The to monitor for cancellation requests. + /// The default is . /// The input text converted to uppercase - public async ValueTask HandleAsync(string message, IWorkflowContext context) => + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) => message.ToUpperInvariant(); // The return value will be sent as a message along an edge to subsequent executors } @@ -65,8 +67,10 @@ internal sealed class ReverseTextExecutor() : ReflectingExecutor /// The input text to reverse /// Workflow context for accessing workflow services and adding events + /// The to monitor for cancellation requests. + /// The default is . /// The input text reversed - public async ValueTask HandleAsync(string message, IWorkflowContext context) + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken) { // Because we do not suppress it, the returned result will be yielded as an output from this executor. return string.Concat(message.Reverse()); diff --git a/dotnet/samples/GettingStarted/Workflows/_Foundational/03_AgentsInWorkflows/03_AgentsInWorkflows.csproj b/dotnet/samples/GettingStarted/Workflows/_Foundational/03_AgentsInWorkflows/03_AgentsInWorkflows.csproj index 354163794e..84ac3db0d5 100644 --- a/dotnet/samples/GettingStarted/Workflows/_Foundational/03_AgentsInWorkflows/03_AgentsInWorkflows.csproj +++ b/dotnet/samples/GettingStarted/Workflows/_Foundational/03_AgentsInWorkflows/03_AgentsInWorkflows.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Workflows/_Foundational/03_AgentsInWorkflows/Program.cs b/dotnet/samples/GettingStarted/Workflows/_Foundational/03_AgentsInWorkflows/Program.cs index 2b30f3f5a1..840ff7c76e 100644 --- a/dotnet/samples/GettingStarted/Workflows/_Foundational/03_AgentsInWorkflows/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/_Foundational/03_AgentsInWorkflows/Program.cs @@ -1,10 +1,11 @@ // Copyright (c) Microsoft. All rights reserved. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Agents.AI.Workflows; using Microsoft.Extensions.AI; +using OpenAI; namespace WorkflowAgentsInWorkflowsSample; @@ -30,7 +31,10 @@ private static async Task Main() // Set up the Azure OpenAI client var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - var chatClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetChatClient(deploymentName).AsIChatClient(); + var chatClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName).AsIChatClient(); // Create agents AIAgent frenchAgent = GetTranslationAgent("French", chatClient); diff --git a/dotnet/samples/GettingStarted/Workflows/_Foundational/04_AgentWorkflowPatterns/04_AgentWorkflowPatterns.csproj b/dotnet/samples/GettingStarted/Workflows/_Foundational/04_AgentWorkflowPatterns/04_AgentWorkflowPatterns.csproj index 354163794e..84ac3db0d5 100644 --- a/dotnet/samples/GettingStarted/Workflows/_Foundational/04_AgentWorkflowPatterns/04_AgentWorkflowPatterns.csproj +++ b/dotnet/samples/GettingStarted/Workflows/_Foundational/04_AgentWorkflowPatterns/04_AgentWorkflowPatterns.csproj @@ -9,7 +9,6 @@ - diff --git a/dotnet/samples/GettingStarted/Workflows/_Foundational/04_AgentWorkflowPatterns/Program.cs b/dotnet/samples/GettingStarted/Workflows/_Foundational/04_AgentWorkflowPatterns/Program.cs index aeb06cfb64..0fc8ce931c 100644 --- a/dotnet/samples/GettingStarted/Workflows/_Foundational/04_AgentWorkflowPatterns/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/_Foundational/04_AgentWorkflowPatterns/Program.cs @@ -1,11 +1,12 @@ // Copyright (c) Microsoft. All rights reserved. +using System.ClientModel.Primitives; using System.Text.Json; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Agents.AI.Workflows; using Microsoft.Extensions.AI; +using OpenAI; namespace WorkflowAgentsInWorkflowsSample; @@ -25,7 +26,10 @@ private static async Task Main() // Set up the Azure OpenAI client. var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - var client = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetChatClient(deploymentName).AsIChatClient(); + var client = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName).AsIChatClient(); Console.Write("Choose workflow type ('sequential', 'concurrent', 'handoffs', 'groupchat'): "); switch (Console.ReadLine()) diff --git a/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step01_Basics/Program.cs b/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step01_Basics/Program.cs index 527f813be6..0f12a859f8 100644 --- a/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step01_Basics/Program.cs +++ b/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step01_Basics/Program.cs @@ -1,6 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.SemanticKernel; @@ -50,7 +50,10 @@ async Task AFAgent() { Console.WriteLine("\n=== AF Agent ===\n"); - var agent = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetChatClient(deploymentName) + var agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName) .CreateAIAgent(name: "Joker", instructions: "You are good at telling jokes."); var thread = agent.GetNewThread(); diff --git a/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step02_ToolCall/Program.cs b/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step02_ToolCall/Program.cs index 56ca87973a..bf4122d486 100644 --- a/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step02_ToolCall/Program.cs +++ b/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step02_ToolCall/Program.cs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. +using System.ClientModel.Primitives; using System.ComponentModel; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Extensions.AI; using Microsoft.SemanticKernel; @@ -44,7 +44,10 @@ async Task SKAgent() async Task AFAgent() { - var agent = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetChatClient(deploymentName) + var agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetChatClient(deploymentName) .CreateAIAgent(instructions: "You are a helpful assistant", tools: [AIFunctionFactory.Create(GetWeather)]); Console.WriteLine("\n=== AF Agent Response ===\n"); diff --git a/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step03_DependencyInjection/Program.cs b/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step03_DependencyInjection/Program.cs index caf166674d..e16a2350d3 100644 --- a/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step03_DependencyInjection/Program.cs +++ b/dotnet/samples/SemanticKernelMigration/AzureOpenAI/Step03_DependencyInjection/Program.cs @@ -1,6 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -43,7 +43,9 @@ async Task AFAgent() Console.WriteLine("\n=== AF Agent ===\n"); var serviceCollection = new ServiceCollection(); - serviceCollection.AddTransient((sp) => new AzureOpenAIClient(new(endpoint), new AzureCliCredential()) + serviceCollection.AddTransient((sp) => new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetChatClient(deploymentName) .CreateAIAgent(name: "Joker", instructions: "You are good at telling jokes.")); diff --git a/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step01_Basics/Program.cs b/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step01_Basics/Program.cs index beb7bed1c9..eff8fbfc29 100644 --- a/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step01_Basics/Program.cs +++ b/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step01_Basics/Program.cs @@ -1,6 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.SemanticKernel; @@ -23,7 +23,10 @@ async Task SKAgent() Console.WriteLine("\n=== SK Agent ===\n"); var _ = Kernel.CreateBuilder().AddAzureOpenAIChatClient(deploymentName, endpoint, new AzureCliCredential()); - var assistantsClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetAssistantClient(); + var assistantsClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetAssistantClient(); // Define the assistant Assistant assistant = await assistantsClient.CreateAssistantAsync(deploymentName, name: "Joker", instructions: "You are good at telling jokes."); @@ -56,7 +59,10 @@ async Task AFAgent() { Console.WriteLine("\n=== AF Agent ===\n"); - var assistantClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetAssistantClient(); + var assistantClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetAssistantClient(); var agent = await assistantClient.CreateAIAgentAsync(deploymentName, name: "Joker", instructions: "You are good at telling jokes."); diff --git a/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step02_ToolCall/Program.cs b/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step02_ToolCall/Program.cs index 4ec04a276a..d50f7cfb24 100644 --- a/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step02_ToolCall/Program.cs +++ b/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step02_ToolCall/Program.cs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. +using System.ClientModel.Primitives; using System.ComponentModel; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -30,7 +30,10 @@ async Task SKAgent() Console.WriteLine("\n=== SK Agent ===\n"); var builder = Kernel.CreateBuilder(); - var assistantsClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetAssistantClient(); + var assistantsClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetAssistantClient(); Assistant assistant = await assistantsClient.CreateAssistantAsync(deploymentName, instructions: "You are a helpful assistant"); @@ -71,7 +74,10 @@ async Task AFAgent() { Console.WriteLine("\n=== AF Agent ===\n"); - var assistantClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetAssistantClient(); + var assistantClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetAssistantClient(); var agent = await assistantClient.CreateAIAgentAsync(deploymentName, instructions: "You are a helpful assistant", diff --git a/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step03_DependencyInjection/Program.cs b/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step03_DependencyInjection/Program.cs index ad6b00be1c..8e48f9f783 100644 --- a/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step03_DependencyInjection/Program.cs +++ b/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step03_DependencyInjection/Program.cs @@ -1,6 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.DependencyInjection; @@ -24,7 +24,10 @@ async Task SKAgent() Console.WriteLine("\n=== SK Agent ===\n"); var serviceCollection = new ServiceCollection(); - serviceCollection.AddSingleton((sp) => new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetAssistantClient()); + serviceCollection.AddSingleton((sp) => new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetAssistantClient()); serviceCollection.AddKernel().AddAzureOpenAIChatClient(deploymentName, endpoint, new AzureCliCredential()); serviceCollection.AddTransient((sp) => { @@ -65,7 +68,10 @@ async Task AFAgent() Console.WriteLine("\n=== AF Agent ===\n"); var serviceCollection = new ServiceCollection(); - serviceCollection.AddSingleton((sp) => new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetAssistantClient()); + serviceCollection.AddSingleton((sp) => new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetAssistantClient()); serviceCollection.AddTransient((sp) => { var assistantClient = sp.GetRequiredService(); diff --git a/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step04_CodeInterpreter/Program.cs b/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step04_CodeInterpreter/Program.cs index 5353aaab5f..631313b7c0 100644 --- a/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step04_CodeInterpreter/Program.cs +++ b/dotnet/samples/SemanticKernelMigration/AzureOpenAIAssistants/Step04_CodeInterpreter/Program.cs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. +using System.ClientModel.Primitives; using System.Text; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -15,7 +15,10 @@ var deploymentName = System.Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o"; var userInput = "Create a python code file using the code interpreter tool with a code ready to determine the values in the Fibonacci sequence that are less then the value of 101"; -var assistantsClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()).GetAssistantClient(); +var assistantsClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) + .GetAssistantClient(); Console.WriteLine($"User Input: {userInput}"); diff --git a/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step01_Basics/Program.cs b/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step01_Basics/Program.cs index 22a17887e1..685126d697 100644 --- a/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step01_Basics/Program.cs +++ b/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step01_Basics/Program.cs @@ -1,6 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.SemanticKernel.Agents.OpenAI; @@ -19,7 +19,9 @@ async Task SKAgentAsync() { Console.WriteLine("\n=== SK Agent ===\n"); - var responseClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) + var responseClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetOpenAIResponseClient(deploymentName); OpenAIResponseAgent agent = new(responseClient) { @@ -49,7 +51,9 @@ async Task AFAgentAsync() { Console.WriteLine("\n=== AF Agent ===\n"); - var agent = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) + var agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetOpenAIResponseClient(deploymentName) .CreateAIAgent(name: "Joker", instructions: "You are good at telling jokes."); diff --git a/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step02_ReasoningModel/Program.cs b/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step02_ReasoningModel/Program.cs index 8ee5ae89b6..daaeaa283c 100644 --- a/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step02_ReasoningModel/Program.cs +++ b/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step02_ReasoningModel/Program.cs @@ -1,6 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -46,7 +46,9 @@ async Task SKAgentAsync() { Console.WriteLine("\n=== SK Agent ===\n"); - var responseClient = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) + var responseClient = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetOpenAIResponseClient(deploymentName); OpenAIResponseAgent agent = new(responseClient) { @@ -114,7 +116,9 @@ async Task AFAgentAsync() { Console.WriteLine("\n=== AF Agent ===\n"); - var agent = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) + var agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetOpenAIResponseClient(deploymentName) .CreateAIAgent(name: "Thinker", instructions: "You are good at thinking hard before answering."); diff --git a/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step03_ToolCall/Program.cs b/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step03_ToolCall/Program.cs index 12bb3b46b7..52acb3dbed 100644 --- a/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step03_ToolCall/Program.cs +++ b/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step03_ToolCall/Program.cs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. +using System.ClientModel.Primitives; using System.ComponentModel; -using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Extensions.AI; using Microsoft.SemanticKernel; @@ -24,7 +24,9 @@ static string GetWeather([Description("The location to get the weather for.")] s async Task SKAgentAsync() { - OpenAIResponseAgent agent = new(new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) + OpenAIResponseAgent agent = new(new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetOpenAIResponseClient(deploymentName)); // Initialize plugin and add to the agent's Kernel (same as direct Kernel usage). @@ -43,7 +45,9 @@ async Task SKAgentAsync() async Task AFAgentAsync() { - var agent = new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) + var agent = new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetOpenAIResponseClient(deploymentName) .CreateAIAgent(instructions: "You are a helpful assistant", tools: [AIFunctionFactory.Create(GetWeather)]); diff --git a/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step04_DependencyInjection/Program.cs b/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step04_DependencyInjection/Program.cs index e2bef86a7f..419fb2f21b 100644 --- a/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step04_DependencyInjection/Program.cs +++ b/dotnet/samples/SemanticKernelMigration/AzureOpenAIResponses/Step04_DependencyInjection/Program.cs @@ -1,6 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. -using Azure.AI.OpenAI; +using System.ClientModel.Primitives; using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; @@ -23,7 +23,9 @@ async Task SKAgentAsync() var serviceCollection = new ServiceCollection(); serviceCollection.AddTransient((sp) - => new OpenAIResponseAgent(new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) + => new OpenAIResponseAgent(new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetOpenAIResponseClient(deploymentName)) { Name = "Joker", @@ -42,7 +44,9 @@ async Task AFAgentAsync() Console.WriteLine("\n=== AF Agent ===\n"); var serviceCollection = new ServiceCollection(); - serviceCollection.AddTransient((sp) => new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()) + serviceCollection.AddTransient((sp) => new OpenAIClient( + new BearerTokenPolicy(new AzureCliCredential(), "https://ai.azure.com/.default"), + new OpenAIClientOptions() { Endpoint = new Uri($"{endpoint}/openai/v1") }) .GetOpenAIResponseClient(deploymentName) .CreateAIAgent(name: "Joker", instructions: "You are good at telling jokes.")); diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/AzureAgentProvider.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/AzureAgentProvider.cs index 141233c2cb..0ff167d50d 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/AzureAgentProvider.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/AzureAgentProvider.cs @@ -37,7 +37,13 @@ public sealed class AzureAgentProvider(string projectEndpoint, TokenCredential p /// public override async Task CreateConversationAsync(CancellationToken cancellationToken = default) { - PersistentAgentThread conversation = await this.GetAgentsClient().Threads.CreateThreadAsync(cancellationToken: cancellationToken).ConfigureAwait(false); + PersistentAgentThread conversation = + await this.GetAgentsClient().Threads.CreateThreadAsync( + messages: null, + toolResources: null, + metadata: null, + cancellationToken).ConfigureAwait(false); + return conversation.Id; } @@ -78,6 +84,7 @@ IEnumerable GetContent() TextContent textContent => new MessageInputTextBlock(textContent.Text), HostedFileContent fileContent => new MessageInputImageFileBlock(new MessageImageFileParam(fileContent.FileId)), UriContent uriContent when uriContent.Uri is not null => new MessageInputImageUriBlock(new MessageImageUriParam(uriContent.Uri.ToString())), + DataContent dataContent when dataContent.Uri is not null => new MessageInputImageUriBlock(new MessageImageUriParam(dataContent.Uri)), _ => null // Unsupported content type }; @@ -91,7 +98,7 @@ IEnumerable GetContent() /// public override async Task GetAgentAsync(string agentId, CancellationToken cancellationToken = default) => - await this.GetAgentsClient().GetAIAgentAsync(agentId, chatOptions: null, cancellationToken: cancellationToken).ConfigureAwait(false); + await this.GetAgentsClient().GetAIAgentAsync(agentId, chatOptions: null, clientFactory: null, cancellationToken).ConfigureAwait(false); /// public override async Task GetMessageAsync(string conversationId, string messageId, CancellationToken cancellationToken = default) diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/AgentProviderExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/AgentProviderExtensions.cs index 20a825454a..5b6bbbc297 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/AgentProviderExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/AgentProviderExtensions.cs @@ -4,12 +4,21 @@ using System.Linq; using System.Threading; using System.Threading.Tasks; +using Azure.AI.Agents.Persistent; using Microsoft.Extensions.AI; namespace Microsoft.Agents.AI.Workflows.Declarative.Extensions; internal static class AgentProviderExtensions { + private static readonly HashSet s_failureStatus = + [ + Azure.AI.Agents.Persistent.RunStatus.Failed, + Azure.AI.Agents.Persistent.RunStatus.Cancelled, + Azure.AI.Agents.Persistent.RunStatus.Cancelling, + Azure.AI.Agents.Persistent.RunStatus.Expired, + ]; + public static async ValueTask InvokeAgentAsync( this WorkflowAgentProvider agentProvider, string executorId, @@ -51,9 +60,16 @@ inputMessages is not null ? updates.Add(update); + if (update.RawRepresentation is ChatResponseUpdate chatUpdate && + chatUpdate.RawRepresentation is RunUpdate runUpdate && + s_failureStatus.Contains(runUpdate.Value.Status)) + { + throw new DeclarativeActionException($"Unexpected failure invoking agent, run {runUpdate.Value.Status}: {agent.Name ?? agent.Id} [{runUpdate.Value.Id}/{conversationId}]"); + } + if (autoSend) { - await context.AddEventAsync(new AgentRunUpdateEvent(executorId, update)).ConfigureAwait(false); + await context.AddEventAsync(new AgentRunUpdateEvent(executorId, update), cancellationToken).ConfigureAwait(false); } } @@ -61,7 +77,7 @@ inputMessages is not null ? if (autoSend) { - await context.AddEventAsync(new AgentRunResponseEvent(executorId, response)).ConfigureAwait(false); + await context.AddEventAsync(new AgentRunResponseEvent(executorId, response), cancellationToken).ConfigureAwait(false); } if (autoSend && !isWorkflowConversation && workflowConversationId is not null) @@ -87,7 +103,7 @@ async ValueTask AssignConversationIdAsync(string? assignValue) { conversationId = assignValue; - await context.QueueConversationUpdateAsync(conversationId).ConfigureAwait(false); + await context.QueueConversationUpdateAsync(conversationId, cancellationToken).ConfigureAwait(false); } } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/ChatMessageExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/ChatMessageExtensions.cs index c8df8c277a..f4f3bb65af 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/ChatMessageExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/ChatMessageExtensions.cs @@ -131,7 +131,7 @@ public static ChatRole ToChatRole(this AgentMessageRole role) => return contentType switch { - AgentMessageContentType.ImageUrl => new UriContent(contentValue, "image/*"), + AgentMessageContentType.ImageUrl => GetImageContent(contentValue), AgentMessageContentType.ImageFile => new HostedFileContent(contentValue), _ => new TextContent(contentValue) }; @@ -169,7 +169,7 @@ private static IEnumerable GetContent(this RecordDataValue message) yield return contentItem?.GetProperty(TypeSchema.Message.Fields.ContentType)?.Value switch { - TypeSchema.Message.ContentTypes.ImageUrl => new UriContent(contentValue.Value, "image/*"), + TypeSchema.Message.ContentTypes.ImageUrl => GetImageContent(contentValue.Value), TypeSchema.Message.ContentTypes.ImageFile => new HostedFileContent(contentValue.Value), _ => new TextContent(contentValue.Value) }; @@ -177,6 +177,11 @@ private static IEnumerable GetContent(this RecordDataValue message) } } + private static AIContent GetImageContent(string uriText) => + uriText.StartsWith("data:", StringComparison.OrdinalIgnoreCase) ? + new DataContent(uriText, "image/*") : + new UriContent(uriText, "image/*"); + private static TValue? GetProperty(this RecordDataValue record, string name) where TValue : DataValue { diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/IWorkflowContextExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/IWorkflowContextExtensions.cs index 4e2815f218..720636178e 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/IWorkflowContextExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Extensions/IWorkflowContextExtensions.cs @@ -14,23 +14,11 @@ namespace Microsoft.Agents.AI.Workflows.Declarative.Extensions; internal static class IWorkflowContextExtensions { - public static ValueTask RaiseInvocationEventAsync(this IWorkflowContext context, DialogAction action, string? priorEventId = null) => - context.AddEventAsync(new DeclarativeActionInvokedEvent(action, priorEventId)); + public static ValueTask RaiseInvocationEventAsync(this IWorkflowContext context, DialogAction action, string? priorEventId = null, CancellationToken cancellationToken = default) => + context.AddEventAsync(new DeclarativeActionInvokedEvent(action, priorEventId), cancellationToken); - public static ValueTask RaiseCompletionEventAsync(this IWorkflowContext context, DialogAction action) => - context.AddEventAsync(new DeclarativeActionCompletedEvent(action)); - - public static ValueTask SendResultMessageAsync(this IWorkflowContext context, string id, object? result = null, CancellationToken cancellationToken = default) => - context.SendMessageAsync(new ActionExecutorResult(id, result)); - - public static ValueTask QueueStateResetAsync(this IWorkflowContext context, PropertyPath variablePath) => - context.QueueStateUpdateAsync(Throw.IfNull(variablePath.VariableName), UnassignedValue.Instance, Throw.IfNull(variablePath.NamespaceAlias)); - - public static ValueTask QueueStateUpdateAsync(this IWorkflowContext context, PropertyPath variablePath, TValue? value) => - context.QueueStateUpdateAsync(Throw.IfNull(variablePath.VariableName), value, Throw.IfNull(variablePath.NamespaceAlias)); - - public static ValueTask QueueSystemUpdateAsync(this IWorkflowContext context, string key, TValue? value) => - DeclarativeContext(context).QueueSystemUpdateAsync(key, value); + public static ValueTask RaiseCompletionEventAsync(this IWorkflowContext context, DialogAction action, CancellationToken cancellationToken = default) => + context.AddEventAsync(new DeclarativeActionCompletedEvent(action), cancellationToken); public static FormulaValue ReadState(this IWorkflowContext context, PropertyPath variablePath) => context.ReadState(Throw.IfNull(variablePath.VariableName), Throw.IfNull(variablePath.NamespaceAlias)); @@ -38,18 +26,47 @@ public static FormulaValue ReadState(this IWorkflowContext context, PropertyPath public static FormulaValue ReadState(this IWorkflowContext context, string key, string? scopeName = null) => DeclarativeContext(context).State.Get(key, scopeName); - public static async ValueTask QueueConversationUpdateAsync(this IWorkflowContext context, string conversationId, bool isExternal = false) + public static ValueTask SendResultMessageAsync(this IWorkflowContext context, string id, CancellationToken cancellationToken = default) => + context.SendResultMessageAsync(id, result: null, cancellationToken); + + public static ValueTask SendResultMessageAsync(this IWorkflowContext context, string id, object? result, CancellationToken cancellationToken = default) => + context.SendMessageAsync(new ActionExecutorResult(id, result), targetId: null, cancellationToken); + + public static ValueTask QueueStateResetAsync(this IWorkflowContext context, PropertyPath variablePath, CancellationToken cancellationToken = default) => + context.QueueStateUpdateAsync(Throw.IfNull(variablePath.VariableName), UnassignedValue.Instance, Throw.IfNull(variablePath.NamespaceAlias), cancellationToken); + + public static ValueTask QueueStateUpdateAsync(this IWorkflowContext context, PropertyPath variablePath, TValue? value, CancellationToken cancellationToken = default) => + context.QueueStateUpdateAsync(Throw.IfNull(variablePath.VariableName), value, Throw.IfNull(variablePath.NamespaceAlias), cancellationToken); + + public static async ValueTask QueueEnvironmentUpdateAsync(this IWorkflowContext context, string key, TValue? value, CancellationToken cancellationToken = default) + { + DeclarativeWorkflowContext declarativeContext = DeclarativeContext(context); + await declarativeContext.UpdateStateAsync(key, value, VariableScopeNames.Environment, allowSystem: true, cancellationToken).ConfigureAwait(false); + declarativeContext.State.Bind(); + } + + public static async ValueTask QueueSystemUpdateAsync(this IWorkflowContext context, string key, TValue? value, CancellationToken cancellationToken = default) + { + DeclarativeWorkflowContext declarativeContext = DeclarativeContext(context); + await declarativeContext.UpdateStateAsync(key, value, VariableScopeNames.System, allowSystem: true, cancellationToken).ConfigureAwait(false); + declarativeContext.State.Bind(); + } + + public static ValueTask QueueConversationUpdateAsync(this IWorkflowContext context, string conversationId, CancellationToken cancellationToken = default) => + context.QueueConversationUpdateAsync(conversationId, isExternal: false, cancellationToken); + + public static async ValueTask QueueConversationUpdateAsync(this IWorkflowContext context, string conversationId, bool isExternal = false, CancellationToken cancellationToken = default) { RecordValue conversation = (RecordValue)context.ReadState(SystemScope.Names.Conversation, VariableScopeNames.System); if (isExternal) { conversation.UpdateField("Id", FormulaValue.New(conversationId)); - await context.QueueSystemUpdateAsync(SystemScope.Names.Conversation, conversation).ConfigureAwait(false); - await context.QueueSystemUpdateAsync(SystemScope.Names.ConversationId, FormulaValue.New(conversationId)).ConfigureAwait(false); + await context.QueueSystemUpdateAsync(SystemScope.Names.Conversation, conversation, cancellationToken).ConfigureAwait(false); + await context.QueueSystemUpdateAsync(SystemScope.Names.ConversationId, FormulaValue.New(conversationId), cancellationToken).ConfigureAwait(false); } - await context.AddEventAsync(new ConversationUpdateEvent(conversationId) { IsWorkflow = isExternal }).ConfigureAwait(false); + await context.AddEventAsync(new ConversationUpdateEvent(conversationId) { IsWorkflow = isExternal }, cancellationToken).ConfigureAwait(false); } public static bool IsWorkflowConversation( diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeActionExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeActionExecutor.cs index 95604846ec..3b0169bc00 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeActionExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeActionExecutor.cs @@ -61,7 +61,7 @@ public ValueTask ResetAsync() } /// - public override async ValueTask HandleAsync(ActionExecutorResult message, IWorkflowContext context) + public override async ValueTask HandleAsync(ActionExecutorResult message, IWorkflowContext context, CancellationToken cancellationToken = default) { if (this.Model.Disabled) { @@ -69,7 +69,7 @@ public override async ValueTask HandleAsync(ActionExecutorResult message, IWorkf return; } - await context.RaiseInvocationEventAsync(this.Model, message.ExecutorId).ConfigureAwait(false); + await context.RaiseInvocationEventAsync(this.Model, message.ExecutorId, cancellationToken).ConfigureAwait(false); try { @@ -78,7 +78,7 @@ public override async ValueTask HandleAsync(ActionExecutorResult message, IWorkf if (this.EmitResultEvent) { - await context.SendResultMessageAsync(this.Id, result).ConfigureAwait(false); + await context.SendResultMessageAsync(this.Id, result, cancellationToken).ConfigureAwait(false); } } catch (DeclarativeActionException exception) @@ -95,7 +95,7 @@ public override async ValueTask HandleAsync(ActionExecutorResult message, IWorkf { if (this.IsDiscreteAction) { - await context.RaiseCompletionEventAsync(this.Model).ConfigureAwait(false); + await context.RaiseCompletionEventAsync(this.Model, cancellationToken).ConfigureAwait(false); } } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeWorkflowContext.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeWorkflowContext.cs index 3b01032467..f4b7cb14ba 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeWorkflowContext.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeWorkflowContext.cs @@ -3,6 +3,7 @@ using System.Collections.Frozen; using System.Collections.Generic; using System.Linq; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.Declarative.Extensions; using Microsoft.Agents.AI.Workflows.Declarative.Kit; @@ -32,16 +33,18 @@ public DeclarativeWorkflowContext(IWorkflowContext source, WorkflowFormulaState public IReadOnlyDictionary? TraceContext => this.Source.TraceContext; /// - public ValueTask AddEventAsync(WorkflowEvent workflowEvent) => this.Source.AddEventAsync(workflowEvent); + public ValueTask AddEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default) + => this.Source.AddEventAsync(workflowEvent, cancellationToken); /// - public ValueTask YieldOutputAsync(object output) => this.Source.YieldOutputAsync(output); + public ValueTask YieldOutputAsync(object output, CancellationToken cancellationToken = default) + => this.Source.YieldOutputAsync(output, cancellationToken); /// public ValueTask RequestHaltAsync() => this.Source.RequestHaltAsync(); /// - public async ValueTask QueueClearScopeAsync(string? scopeName = null) + public async ValueTask QueueClearScopeAsync(string? scopeName = null, CancellationToken cancellationToken = default) { if (scopeName is not null) { @@ -50,12 +53,12 @@ public async ValueTask QueueClearScopeAsync(string? scopeName = null) // Copy keys to array to avoid modifying collection during enumeration. foreach (string key in this.State.Keys(scopeName).ToArray()) { - await this.UpdateStateAsync(key, UnassignedValue.Instance, scopeName).ConfigureAwait(false); + await this.UpdateStateAsync(key, UnassignedValue.Instance, scopeName, allowSystem: false, cancellationToken).ConfigureAwait(false); } } else { - await this.Source.QueueClearScopeAsync(scopeName).ConfigureAwait(false); + await this.Source.QueueClearScopeAsync(scopeName, cancellationToken).ConfigureAwait(false); } this.State.Bind(); @@ -63,20 +66,14 @@ public async ValueTask QueueClearScopeAsync(string? scopeName = null) } /// - public async ValueTask QueueStateUpdateAsync(string key, T? value, string? scopeName = null) + public async ValueTask QueueStateUpdateAsync(string key, T? value, string? scopeName = null, CancellationToken cancellationToken = default) { - await this.UpdateStateAsync(key, value, scopeName).ConfigureAwait(false); - this.State.Bind(); - } - - public async ValueTask QueueSystemUpdateAsync(string key, TValue? value) - { - await this.UpdateStateAsync(key, value, VariableScopeNames.System, allowSystem: true).ConfigureAwait(false); + await this.UpdateStateAsync(key, value, scopeName, allowSystem: false, cancellationToken).ConfigureAwait(false); this.State.Bind(); } /// - public async ValueTask ReadStateAsync(string key, string? scopeName = null) + public async ValueTask ReadStateAsync(string key, string? scopeName = null, CancellationToken cancellationToken = default) { bool isManagedScope = scopeName is not null && // null scope cannot be managed @@ -86,21 +83,23 @@ scopeName is not null && // null scope cannot be managed { // Not a managed scope, just pass through. This is valid when a declarative // workflow has been ejected to code (where DeclarativeWorkflowContext is also utilized). - _ when !isManagedScope => await this.Source.ReadStateAsync(key, scopeName).ConfigureAwait(false), + _ when !isManagedScope => await this.Source.ReadStateAsync(key, scopeName, cancellationToken).ConfigureAwait(false), // Retrieve formula values directly from the managed state to avoid conversion. _ when typeof(TValue) == typeof(FormulaValue) => (TValue?)(object?)this.State.Get(key, scopeName), // Retrieve native types from the source context to avoid conversion. - _ => await this.Source.ReadStateAsync(key, scopeName).ConfigureAwait(false), + _ => await this.Source.ReadStateAsync(key, scopeName, cancellationToken).ConfigureAwait(false), }; } /// - public ValueTask> ReadStateKeysAsync(string? scopeName = null) => this.Source.ReadStateKeysAsync(scopeName); + public ValueTask> ReadStateKeysAsync(string? scopeName = null, CancellationToken cancellationToken = default) + => this.Source.ReadStateKeysAsync(scopeName, cancellationToken); /// - public ValueTask SendMessageAsync(object message, string? targetId = null) => this.Source.SendMessageAsync(message, targetId); + public ValueTask SendMessageAsync(object message, string? targetId = null, CancellationToken cancellationToken = default) + => this.Source.SendMessageAsync(message, targetId, cancellationToken); - private ValueTask UpdateStateAsync(string key, T? value, string? scopeName, bool allowSystem = true) + public ValueTask UpdateStateAsync(string key, T? value, string? scopeName, bool allowSystem, CancellationToken cancellationToken = default) { bool isManagedScope = scopeName is not null && // null scope cannot be managed @@ -110,7 +109,7 @@ scopeName is not null && // null scope cannot be managed { // Not a managed scope, just pass through. This is valid when a declarative // workflow has been ejected to code (where DeclarativeWorkflowContext is also utilized). - return this.Source.QueueStateUpdateAsync(key, value, scopeName); + return this.Source.QueueStateUpdateAsync(key, value, scopeName, cancellationToken); } if (!ManagedScopes.Contains(scopeName!) && !allowSystem) @@ -134,7 +133,7 @@ ValueTask QueueEmptyStateAsync() { this.State.Set(key, FormulaValue.NewBlank(), scopeName); } - return this.Source.QueueStateUpdateAsync(key, UnassignedValue.Instance, scopeName); + return this.Source.QueueStateUpdateAsync(key, UnassignedValue.Instance, scopeName, cancellationToken); } ValueTask QueueFormulaStateAsync(FormulaValue formulaValue) @@ -143,7 +142,7 @@ ValueTask QueueFormulaStateAsync(FormulaValue formulaValue) { this.State.Set(key, formulaValue, scopeName); } - return this.Source.QueueStateUpdateAsync(key, formulaValue.ToObject(), scopeName); + return this.Source.QueueStateUpdateAsync(key, formulaValue.ToObject(), scopeName, cancellationToken); } ValueTask QueueDataValueStateAsync(DataValue dataValue) @@ -153,7 +152,7 @@ ValueTask QueueDataValueStateAsync(DataValue dataValue) FormulaValue formulaValue = dataValue.ToFormula(); this.State.Set(key, formulaValue, scopeName); } - return this.Source.QueueStateUpdateAsync(key, dataValue.ToObject(), scopeName); + return this.Source.QueueStateUpdateAsync(key, dataValue.ToObject(), scopeName, cancellationToken); } ValueTask QueueNativeStateAsync(object? rawValue) @@ -163,7 +162,7 @@ ValueTask QueueNativeStateAsync(object? rawValue) FormulaValue formulaValue = rawValue.ToFormula(); this.State.Set(key, formulaValue, scopeName); } - return this.Source.QueueStateUpdateAsync(key, rawValue, scopeName); + return this.Source.QueueStateUpdateAsync(key, rawValue, scopeName, cancellationToken); } } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeWorkflowExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeWorkflowExecutor.cs index e30bb94a01..7436e64446 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeWorkflowExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DeclarativeWorkflowExecutor.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. using System; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.Declarative.Extensions; using Microsoft.Agents.AI.Workflows.Declarative.PowerFx; @@ -24,7 +25,7 @@ public ValueTask ResetAsync() return default; } - public override async ValueTask HandleAsync(TInput message, IWorkflowContext context) + public override async ValueTask HandleAsync(TInput message, IWorkflowContext context, CancellationToken cancellationToken = default) { // No state to restore if we're starting from the beginning. state.SetInitialized(); @@ -35,13 +36,13 @@ public override async ValueTask HandleAsync(TInput message, IWorkflowContext con string? conversationId = options.ConversationId; if (string.IsNullOrWhiteSpace(conversationId)) { - conversationId = await options.AgentProvider.CreateConversationAsync(cancellationToken: default).ConfigureAwait(false); + conversationId = await options.AgentProvider.CreateConversationAsync(cancellationToken).ConfigureAwait(false); } - await declarativeContext.QueueConversationUpdateAsync(conversationId, isExternal: true).ConfigureAwait(false); + await declarativeContext.QueueConversationUpdateAsync(conversationId, isExternal: true, cancellationToken).ConfigureAwait(false); - await options.AgentProvider.CreateMessageAsync(conversationId, input, cancellationToken: default).ConfigureAwait(false); - await declarativeContext.SetLastMessageAsync(input).ConfigureAwait(false); + ChatMessage inputMessage = await options.AgentProvider.CreateMessageAsync(conversationId, input, cancellationToken).ConfigureAwait(false); + await declarativeContext.SetLastMessageAsync(inputMessage).ConfigureAwait(false); - await context.SendResultMessageAsync(this.Id).ConfigureAwait(false); + await context.SendResultMessageAsync(this.Id, cancellationToken).ConfigureAwait(false); } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DelegateActionExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DelegateActionExecutor.cs index 21f1f1aa4d..1d9a2c7552 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DelegateActionExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Interpreter/DelegateActionExecutor.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. using System.Diagnostics; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.Declarative.Extensions; using Microsoft.Agents.AI.Workflows.Declarative.Kit; @@ -11,11 +12,11 @@ namespace Microsoft.Agents.AI.Workflows.Declarative.Interpreter; internal sealed class DelegateActionExecutor(string actionId, WorkflowFormulaState state, DelegateAction? action = null, bool emitResult = true) : DelegateActionExecutor(actionId, state, action, emitResult) { - public override ValueTask HandleAsync(ActionExecutorResult message, IWorkflowContext context) + public override ValueTask HandleAsync(ActionExecutorResult message, IWorkflowContext context, CancellationToken cancellationToken) { Debug.WriteLine($"RESULT #{this.Id} - {message.Result ?? "(null)"}"); - return base.HandleAsync(message, context); + return base.HandleAsync(message, context, cancellationToken); } } @@ -39,16 +40,16 @@ public ValueTask ResetAsync() return default; } - public override async ValueTask HandleAsync(TMessage message, IWorkflowContext context) + public override async ValueTask HandleAsync(TMessage message, IWorkflowContext context, CancellationToken cancellationToken = default) { if (this._action is not null) { - await this._action.Invoke(new DeclarativeWorkflowContext(context, this._state), message, default).ConfigureAwait(false); + await this._action.Invoke(new DeclarativeWorkflowContext(context, this._state), message, cancellationToken).ConfigureAwait(false); } if (this._emitResult) { - await context.SendResultMessageAsync(this.Id).ConfigureAwait(false); + await context.SendResultMessageAsync(this.Id, cancellationToken).ConfigureAwait(false); } } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/ActionExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/ActionExecutor.cs index e92c6b1b7d..a73eb70a73 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/ActionExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/ActionExecutor.cs @@ -73,12 +73,12 @@ public ValueTask ResetAsync() } /// - public override async ValueTask HandleAsync(TMessage message, IWorkflowContext context) + public override async ValueTask HandleAsync(TMessage message, IWorkflowContext context, CancellationToken cancellationToken) { object? result = await this.ExecuteAsync(new DeclarativeWorkflowContext(context, this._session.State), message, cancellationToken: default).ConfigureAwait(false); Debug.WriteLine($"RESULT #{this.Id} - {result ?? "(null)"}"); - await context.SendResultMessageAsync(this.Id, result).ConfigureAwait(false); + await context.SendResultMessageAsync(this.Id, result, cancellationToken).ConfigureAwait(false); } /// diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/IWorkflowContextExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/IWorkflowContextExtensions.cs index 185068d550..e33f32a1a3 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/IWorkflowContextExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/IWorkflowContextExtensions.cs @@ -132,7 +132,7 @@ public static async ValueTask FormatTemplateAsync(this IWorkflowContext /// The converted value public static async ValueTask ConvertValueAsync(this IWorkflowContext context, VariableType targetType, string key, string? scopeName = null, CancellationToken cancellationToken = default) { - object? sourceValue = await context.ReadStateAsync(key, scopeName).ConfigureAwait(false); + object? sourceValue = await context.ReadStateAsync(key, scopeName, cancellationToken).ConfigureAwait(false); return sourceValue.ConvertType(targetType); } @@ -143,10 +143,11 @@ public static async ValueTask FormatTemplateAsync(this IWorkflowContext /// The workflow execution context used to restore persisted state prior to formatting. /// The key of the state value. /// An optional name that specifies the scope to read.If null, the default scope is used. + /// A token that propagates notification when operation should be canceled. /// The evaluated list expression - public static async ValueTask?> ReadListAsync(this IWorkflowContext context, string key, string? scopeName = null) + public static async ValueTask?> ReadListAsync(this IWorkflowContext context, string key, string? scopeName = null, CancellationToken cancellationToken = default) { - object? value = await context.ReadStateAsync(key, scopeName).ConfigureAwait(false); + object? value = await context.ReadStateAsync(key, scopeName, cancellationToken).ConfigureAwait(false); return value.AsList(); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/RootExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/RootExecutor.cs index a1ebd09f5b..4439207701 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/RootExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Kit/RootExecutor.cs @@ -6,7 +6,6 @@ using Microsoft.Agents.AI.Workflows.Declarative.Extensions; using Microsoft.Agents.AI.Workflows.Declarative.Interpreter; using Microsoft.Agents.AI.Workflows.Declarative.PowerFx; -using Microsoft.Bot.ObjectModel; using Microsoft.Extensions.AI; using Microsoft.Extensions.Configuration; @@ -55,23 +54,23 @@ public ValueTask ResetAsync() } /// - public override async ValueTask HandleAsync(TInput message, IWorkflowContext context) + public override async ValueTask HandleAsync(TInput message, IWorkflowContext context, CancellationToken cancellationToken) { DeclarativeWorkflowContext declarativeContext = new(context, this._state); - await this.ExecuteAsync(message, declarativeContext, cancellationToken: default).ConfigureAwait(false); + await this.ExecuteAsync(message, declarativeContext, cancellationToken).ConfigureAwait(false); ChatMessage input = (this._inputTransform ?? DefaultInputTransform).Invoke(message); if (string.IsNullOrWhiteSpace(this._conversationId)) { - this._conversationId = await this._agentProvider.CreateConversationAsync(cancellationToken: default).ConfigureAwait(false); + this._conversationId = await this._agentProvider.CreateConversationAsync(cancellationToken).ConfigureAwait(false); } - await declarativeContext.QueueConversationUpdateAsync(this._conversationId, isExternal: true).ConfigureAwait(false); + await declarativeContext.QueueConversationUpdateAsync(this._conversationId, isExternal: true, cancellationToken).ConfigureAwait(false); - await this._agentProvider.CreateMessageAsync(this._conversationId, input, cancellationToken: default).ConfigureAwait(false); - await declarativeContext.SetLastMessageAsync(input).ConfigureAwait(false); + ChatMessage inputMessage = await this._agentProvider.CreateMessageAsync(this._conversationId, input, cancellationToken).ConfigureAwait(false); + await declarativeContext.SetLastMessageAsync(inputMessage).ConfigureAwait(false); - await declarativeContext.SendMessageAsync(new ActionExecutorResult(this.Id)).ConfigureAwait(false); + await declarativeContext.SendResultMessageAsync(this.Id, cancellationToken).ConfigureAwait(false); } /// @@ -94,7 +93,7 @@ protected async ValueTask InitializeEnvironmentAsync(IWorkflowContext context, p { foreach (string variableName in variableNames) { - await context.QueueStateUpdateAsync(variableName, GetEnvironmentVariable(variableName), VariableScopeNames.Environment).ConfigureAwait(false); + await context.QueueEnvironmentUpdateAsync(variableName, GetEnvironmentVariable(variableName)).ConfigureAwait(false); } string GetEnvironmentVariable(string name) diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Microsoft.Agents.AI.Workflows.Declarative.csproj b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Microsoft.Agents.AI.Workflows.Declarative.csproj index 6cac284f46..9e23c5f727 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Microsoft.Agents.AI.Workflows.Declarative.csproj +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/Microsoft.Agents.AI.Workflows.Declarative.csproj @@ -1,4 +1,4 @@ - + $(ProjectsTargetFrameworks) @@ -21,16 +21,15 @@ - - - + + diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ClearAllVariablesExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ClearAllVariablesExecutor.cs index 419e112d0a..62f0dbac75 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ClearAllVariablesExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ClearAllVariablesExecutor.cs @@ -28,7 +28,7 @@ internal sealed class ClearAllVariablesExecutor(ClearAllVariables model, Workflo if (scope is not null) { - await context.QueueClearScopeAsync(scope).ConfigureAwait(false); + await context.QueueClearScopeAsync(scope, cancellationToken).ConfigureAwait(false); Debug.WriteLine( $""" STATE: {this.GetType().Name} [{this.Id}] diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ConditionGroupExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ConditionGroupExecutor.cs index 65a6510b18..b935b6e185 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ConditionGroupExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ConditionGroupExecutor.cs @@ -69,5 +69,5 @@ public bool IsElse(object? message) } public async ValueTask DoneAsync(IWorkflowContext context, ActionExecutorResult _, CancellationToken cancellationToken) => - await context.RaiseCompletionEventAsync(this.Model).ConfigureAwait(false); + await context.RaiseCompletionEventAsync(this.Model, cancellationToken).ConfigureAwait(false); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/CreateConversationExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/CreateConversationExecutor.cs index 1da428f931..e229046864 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/CreateConversationExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/CreateConversationExecutor.cs @@ -17,7 +17,7 @@ internal sealed class CreateConversationExecutor(CreateConversation model, Workf { string conversationId = await agentProvider.CreateConversationAsync(cancellationToken).ConfigureAwait(false); await this.AssignAsync(this.Model.ConversationId?.Path, FormulaValue.New(conversationId), context).ConfigureAwait(false); - await context.QueueConversationUpdateAsync(conversationId).ConfigureAwait(false); + await context.QueueConversationUpdateAsync(conversationId, cancellationToken).ConfigureAwait(false); return default; } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ForeachExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ForeachExecutor.cs index 41c9e7cb5a..3130a29b85 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ForeachExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ForeachExecutor.cs @@ -68,11 +68,11 @@ public async ValueTask TakeNextAsync(IWorkflowContext context, object? _, Cancel { FormulaValue value = this._values[this._index]; - await context.QueueStateUpdateAsync(Throw.IfNull(this.Model.Value), value).ConfigureAwait(false); + await context.QueueStateUpdateAsync(Throw.IfNull(this.Model.Value), value, cancellationToken).ConfigureAwait(false); if (this.Model.Index is not null) { - await context.QueueStateUpdateAsync(this.Model.Index.Path, FormulaValue.New(this._index)).ConfigureAwait(false); + await context.QueueStateUpdateAsync(this.Model.Index.Path, FormulaValue.New(this._index), cancellationToken).ConfigureAwait(false); } this._index++; @@ -83,15 +83,15 @@ public async ValueTask ResetAsync(IWorkflowContext context, object? _, Cancellat { try { - await context.QueueStateResetAsync(Throw.IfNull(this.Model.Value)).ConfigureAwait(false); + await context.QueueStateResetAsync(Throw.IfNull(this.Model.Value), cancellationToken).ConfigureAwait(false); if (this.Model.Index is not null) { - await context.QueueStateResetAsync(this.Model.Index).ConfigureAwait(false); + await context.QueueStateResetAsync(this.Model.Index, cancellationToken).ConfigureAwait(false); } } finally { - await context.RaiseCompletionEventAsync(this.Model).ConfigureAwait(false); + await context.RaiseCompletionEventAsync(this.Model, cancellationToken).ConfigureAwait(false); } } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/QuestionExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/QuestionExecutor.cs index 1d34869441..9dbaa2efa1 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/QuestionExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/QuestionExecutor.cs @@ -65,7 +65,7 @@ public static bool IsComplete(object? message) } else { - await context.SendResultMessageAsync(this.Id, result: null, cancellationToken).ConfigureAwait(false); + await context.SendResultMessageAsync(this.Id, cancellationToken).ConfigureAwait(false); } return default; @@ -75,7 +75,7 @@ public async ValueTask PrepareResponseAsync(IWorkflowContext context, ActionExec { int count = await this._promptCount.ReadAsync(context).ConfigureAwait(false); InputRequest inputRequest = new(this.FormatPrompt(this.Model.Prompt)); - await context.SendMessageAsync(inputRequest).ConfigureAwait(false); + await context.SendMessageAsync(inputRequest, targetId: null, cancellationToken).ConfigureAwait(false); await this._promptCount.WriteAsync(context, count + 1).ConfigureAwait(false); } @@ -85,7 +85,7 @@ public async ValueTask CaptureResponseAsync(IWorkflowContext context, InputRespo if (string.IsNullOrWhiteSpace(message.Value)) { string unrecognizedResponse = this.FormatPrompt(this.Model.UnrecognizedPrompt); - await context.AddEventAsync(new MessageActivityEvent(unrecognizedResponse.Trim())).ConfigureAwait(false); + await context.AddEventAsync(new MessageActivityEvent(unrecognizedResponse.Trim()), cancellationToken).ConfigureAwait(false); } else { @@ -97,7 +97,7 @@ public async ValueTask CaptureResponseAsync(IWorkflowContext context, InputRespo else { string invalidResponse = this.FormatPrompt(this.Model.InvalidPrompt); - await context.AddEventAsync(new MessageActivityEvent(invalidResponse.Trim())).ConfigureAwait(false); + await context.AddEventAsync(new MessageActivityEvent(invalidResponse.Trim()), cancellationToken).ConfigureAwait(false); } } @@ -109,13 +109,13 @@ public async ValueTask CaptureResponseAsync(IWorkflowContext context, InputRespo { await this.AssignAsync(this.Model.Variable?.Path, extractedValue, context).ConfigureAwait(false); await this._hasExecuted.WriteAsync(context, true).ConfigureAwait(false); - await context.SendResultMessageAsync(this.Id, result: null, cancellationToken).ConfigureAwait(false); + await context.SendResultMessageAsync(this.Id, cancellationToken).ConfigureAwait(false); } } public async ValueTask CompleteAsync(IWorkflowContext context, ActionExecutorResult message, CancellationToken cancellationToken) { - await context.RaiseCompletionEventAsync(this.Model).ConfigureAwait(false); + await context.RaiseCompletionEventAsync(this.Model, cancellationToken).ConfigureAwait(false); } private async ValueTask PromptAsync(IWorkflowContext context, CancellationToken cancellationToken) @@ -128,8 +128,8 @@ private async ValueTask PromptAsync(IWorkflowContext context, CancellationToken DataValue defaultValue = this.Evaluator.GetValue(defaultValueExpression).Value; await this.AssignAsync(this.Model.Variable?.Path, defaultValue.ToFormula(), context).ConfigureAwait(false); string defaultValueResponse = this.FormatPrompt(this.Model.DefaultValueResponse); - await context.AddEventAsync(new MessageActivityEvent(defaultValueResponse.Trim())).ConfigureAwait(false); - await context.SendResultMessageAsync(this.Id, result: null, cancellationToken).ConfigureAwait(false); + await context.AddEventAsync(new MessageActivityEvent(defaultValueResponse.Trim()), cancellationToken).ConfigureAwait(false); + await context.SendResultMessageAsync(this.Id, cancellationToken).ConfigureAwait(false); } else { diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ResetVariableExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ResetVariableExecutor.cs index da625b11ae..eb679fa4b0 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ResetVariableExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/ResetVariableExecutor.cs @@ -17,7 +17,7 @@ internal sealed class ResetVariableExecutor(ResetVariable model, WorkflowFormula protected override async ValueTask ExecuteAsync(IWorkflowContext context, CancellationToken cancellationToken = default) { Throw.IfNull(this.Model.Variable, $"{nameof(this.Model)}.{nameof(model.Variable)}"); - await context.QueueStateResetAsync(this.Model.Variable).ConfigureAwait(false); + await context.QueueStateResetAsync(this.Model.Variable, cancellationToken).ConfigureAwait(false); Debug.WriteLine( $""" STATE: {this.GetType().Name} [{this.Id}] diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/SendActivityExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/SendActivityExecutor.cs index 69ed8bb970..9af463f865 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/SendActivityExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/ObjectModel/SendActivityExecutor.cs @@ -18,7 +18,7 @@ internal sealed class SendActivityExecutor(SendActivity model, WorkflowFormulaSt { string activityText = this.Engine.Format(messageActivity.Text).Trim(); - await context.AddEventAsync(new MessageActivityEvent(activityText.Trim())).ConfigureAwait(false); + await context.AddEventAsync(new MessageActivityEvent(activityText.Trim()), cancellationToken).ConfigureAwait(false); } return default; diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/PowerFx/WorkflowExpressionEngine.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/PowerFx/WorkflowExpressionEngine.cs index 17a013cb11..fa3ae6b32d 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/PowerFx/WorkflowExpressionEngine.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/PowerFx/WorkflowExpressionEngine.cs @@ -274,6 +274,13 @@ private EvaluationResult EvaluateScope(ExpressionBase expression) expression.VariableReference?.ToString() : expression.ExpressionText; - return new(this._engine.Eval(expressionText), SensitivityLevel.None); + FormulaValue result = this._engine.Eval(expressionText); + + if (result is ErrorValue errorValue) + { + throw new DeclarativeActionException(errorValue.Format()); + } + + return new(result, SensitivityLevel.None); } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/PowerFx/WorkflowFormulaState.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/PowerFx/WorkflowFormulaState.cs index ee58a3c63c..3d810d073b 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/PowerFx/WorkflowFormulaState.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Declarative/PowerFx/WorkflowFormulaState.cs @@ -72,10 +72,10 @@ public async ValueTask RestoreAsync(IWorkflowContext context, CancellationToken async Task ReadScopeAsync(string scopeName) { - HashSet keys = await context.ReadStateKeysAsync(scopeName).ConfigureAwait(false); + HashSet keys = await context.ReadStateKeysAsync(scopeName, cancellationToken).ConfigureAwait(false); foreach (string key in keys) { - object? value = await context.ReadStateAsync(key, scopeName).ConfigureAwait(false); + object? value = await context.ReadStateAsync(key, scopeName, cancellationToken).ConfigureAwait(false); if (value is null or UnassignedValue) { value = FormulaValue.NewBlank(); diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/AgentWorkflowBuilder.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/AgentWorkflowBuilder.cs index b21edee04c..ed91c14e5f 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/AgentWorkflowBuilder.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/AgentWorkflowBuilder.cs @@ -150,12 +150,12 @@ private sealed class AgentRunStreamingExecutor(AIAgent agent, bool includeInputI protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => routeBuilder - .AddHandler((message, context) => this._pendingMessages.Add(new(ChatRole.User, message))) - .AddHandler((message, context) => this._pendingMessages.Add(message)) - .AddHandler>((messages, _) => this._pendingMessages.AddRange(messages)) - .AddHandler((messages, _) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed - .AddHandler>((messages, _) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed - .AddHandler(async (token, context) => + .AddHandler((message, _, __) => this._pendingMessages.Add(new(ChatRole.User, message))) + .AddHandler((message, _, __) => this._pendingMessages.Add(message)) + .AddHandler>((messages, _, __) => this._pendingMessages.AddRange(messages)) + .AddHandler((messages, _, __) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed + .AddHandler>((messages, _, __) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed + .AddHandler(async (token, context, cancellationToken) => { List messages = [.. this._pendingMessages]; this._pendingMessages.Clear(); @@ -163,12 +163,12 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => List? roleChanged = ChangeAssistantToUserForOtherParticipants(agent.DisplayName, messages); List updates = []; - await foreach (var update in agent.RunStreamingAsync(messages).ConfigureAwait(false)) + await foreach (var update in agent.RunStreamingAsync(messages, cancellationToken: cancellationToken).ConfigureAwait(false)) { updates.Add(update); if (token.EmitEvents is true) { - await context.AddEventAsync(new AgentRunUpdateEvent(this.Id, update)).ConfigureAwait(false); + await context.AddEventAsync(new AgentRunUpdateEvent(this.Id, update), cancellationToken).ConfigureAwait(false); } } @@ -181,8 +181,8 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => messages.AddRange(updates.ToAgentRunResponse().Messages); - await context.SendMessageAsync(messages).ConfigureAwait(false); - await context.SendMessageAsync(token).ConfigureAwait(false); + await context.SendMessageAsync(messages, cancellationToken: cancellationToken).ConfigureAwait(false); + await context.SendMessageAsync(token, cancellationToken: cancellationToken).ConfigureAwait(false); }); public ValueTask ResetAsync() @@ -199,7 +199,7 @@ public ValueTask ResetAsync() private sealed class OutputMessagesExecutor() : ChatProtocolExecutor("OutputMessages"), IResettableExecutor { protected override ValueTask TakeTurnAsync(List messages, IWorkflowContext context, bool? emitEvents, CancellationToken cancellationToken = default) - => context.YieldOutputAsync(messages); + => context.YieldOutputAsync(messages, cancellationToken); ValueTask IResettableExecutor.ResetAsync() => this.ResetAsync(); } @@ -209,10 +209,10 @@ private sealed class ChatForwardingExecutor(string id) : Executor(id), IResettab { protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => routeBuilder - .AddHandler((message, context) => context.SendMessageAsync(new ChatMessage(ChatRole.User, message))) - .AddHandler((message, context) => context.SendMessageAsync(message)) - .AddHandler>((messages, context) => context.SendMessageAsync(messages)) - .AddHandler((turnToken, context) => context.SendMessageAsync(turnToken)); + .AddHandler((message, context, cancellationToken) => context.SendMessageAsync(new ChatMessage(ChatRole.User, message), cancellationToken: cancellationToken)) + .AddHandler((message, context, cancellationToken) => context.SendMessageAsync(message, cancellationToken: cancellationToken)) + .AddHandler>((messages, context, cancellationToken) => context.SendMessageAsync(messages, cancellationToken: cancellationToken)) + .AddHandler((turnToken, context, cancellationToken) => context.SendMessageAsync(turnToken, cancellationToken: cancellationToken)); public ValueTask ResetAsync() => default; } @@ -224,7 +224,7 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => private sealed class BatchChatMessagesToListExecutor(string id) : ChatProtocolExecutor(id), IResettableExecutor { protected override ValueTask TakeTurnAsync(List messages, IWorkflowContext context, bool? emitEvents, CancellationToken cancellationToken = default) - => context.SendMessageAsync(messages); + => context.SendMessageAsync(messages, cancellationToken: cancellationToken); ValueTask IResettableExecutor.ResetAsync() => this.ResetAsync(); } @@ -256,7 +256,7 @@ private void Reset() } protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => - routeBuilder.AddHandler>(async (messages, context) => + routeBuilder.AddHandler>(async (messages, context, cancellationToken) => { // TODO: https://github.com/microsoft/agent-framework/issues/784 // This locking should not be necessary. @@ -273,7 +273,7 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => var results = this._allResults; this._allResults = new List>(this._expectedInputs); - await context.YieldOutputAsync(this._aggregator(results)).ConfigureAwait(false); + await context.YieldOutputAsync(this._aggregator(results), cancellationToken).ConfigureAwait(false); } }); @@ -457,16 +457,17 @@ private sealed class StartHandoffsExecutor() : Executor("HandoffStart"), IResett protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => routeBuilder - .AddHandler((message, context) => this._pendingMessages.Add(new(ChatRole.User, message))) - .AddHandler((message, context) => this._pendingMessages.Add(message)) - .AddHandler>((messages, _) => this._pendingMessages.AddRange(messages)) - .AddHandler((messages, _) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed - .AddHandler>((messages, _) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed - .AddHandler(async (token, context) => + .AddHandler((message, context, _) => this._pendingMessages.Add(new(ChatRole.User, message))) + .AddHandler((message, context, _) => this._pendingMessages.Add(message)) + .AddHandler>((messages, _, __) => this._pendingMessages.AddRange(messages)) + .AddHandler((messages, _, __) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed + .AddHandler>((messages, _, __) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed + .AddHandler(async (token, context, cancellationToken) => { var messages = new List(this._pendingMessages); this._pendingMessages.Clear(); - await context.SendMessageAsync(new HandoffState(token, null, messages)).ConfigureAwait(false); + await context.SendMessageAsync(new HandoffState(token, null, messages), cancellationToken: cancellationToken) + .ConfigureAwait(false); }); public ValueTask ResetAsync() @@ -480,8 +481,8 @@ public ValueTask ResetAsync() private sealed class EndHandoffsExecutor() : Executor("HandoffEnd"), IResettableExecutor { protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => - routeBuilder.AddHandler((handoff, context) => - context.YieldOutputAsync(handoff.Messages)); + routeBuilder.AddHandler((handoff, context, cancellationToken) => + context.YieldOutputAsync(handoff.Messages, cancellationToken)); public ValueTask ResetAsync() => default; } @@ -534,7 +535,7 @@ public void Initialize( }); protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => - routeBuilder.AddHandler(async (handoffState, context) => + routeBuilder.AddHandler(async (handoffState, context, cancellationToken) => { string? requestedHandoff = null; List updates = []; @@ -542,24 +543,31 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => List? roleChanges = ChangeAssistantToUserForOtherParticipants(this._agent.DisplayName, allMessages); - await foreach (var update in this._agent.RunStreamingAsync(allMessages, options: this._agentOptions).ConfigureAwait(false)) + await foreach (var update in this._agent.RunStreamingAsync(allMessages, + options: this._agentOptions, + cancellationToken: cancellationToken) + .ConfigureAwait(false)) { - await AddUpdateAsync(update).ConfigureAwait(false); + await AddUpdateAsync(update, cancellationToken).ConfigureAwait(false); foreach (var c in update.Contents) { if (c is FunctionCallContent fcc && this._handoffFunctionNames.Contains(fcc.Name)) { requestedHandoff = fcc.Name; - await AddUpdateAsync(new AgentRunResponseUpdate - { - AgentId = this._agent.Id, - AuthorName = this._agent.DisplayName, - Contents = [new FunctionResultContent(fcc.CallId, "Transferred.")], - CreatedAt = DateTimeOffset.UtcNow, - MessageId = Guid.NewGuid().ToString("N"), - Role = ChatRole.Tool, - }).ConfigureAwait(false); + await AddUpdateAsync( + new AgentRunResponseUpdate + { + AgentId = this._agent.Id, + AuthorName = this._agent.DisplayName, + Contents = [new FunctionResultContent(fcc.CallId, "Transferred.")], + CreatedAt = DateTimeOffset.UtcNow, + MessageId = Guid.NewGuid().ToString("N"), + Role = ChatRole.Tool, + }, + cancellationToken + ) + .ConfigureAwait(false); } } } @@ -568,14 +576,14 @@ await AddUpdateAsync(new AgentRunResponseUpdate ResetUserToAssistantForChangedRoles(roleChanges); - await context.SendMessageAsync(new HandoffState(handoffState.TurnToken, requestedHandoff, allMessages)).ConfigureAwait(false); + await context.SendMessageAsync(new HandoffState(handoffState.TurnToken, requestedHandoff, allMessages), cancellationToken: cancellationToken).ConfigureAwait(false); - async Task AddUpdateAsync(AgentRunResponseUpdate update) + async Task AddUpdateAsync(AgentRunResponseUpdate update, CancellationToken cancellationToken) { updates.Add(update); if (handoffState.TurnToken.EmitEvents is true) { - await context.AddEventAsync(new AgentRunUpdateEvent(this.Id, update)).ConfigureAwait(false); + await context.AddEventAsync(new AgentRunUpdateEvent(this.Id, update), cancellationToken).ConfigureAwait(false); } } }); @@ -623,7 +631,8 @@ public int MaximumIterationCount /// Selects the next agent to participate in the group chat based on the provided chat history and team. /// /// The chat history to consider. - /// The to monitor for cancellation requests. The default is . + /// The to monitor for cancellation requests. + /// The default is . /// The next to speak. This agent must be part of the chat. protected internal abstract ValueTask SelectNextAgentAsync( IReadOnlyList history, @@ -633,7 +642,8 @@ protected internal abstract ValueTask SelectNextAgentAsync( /// Filters the chat history before it's passed to the next agent. /// /// The chat history to filter. - /// The to monitor for cancellation requests. The default is . + /// The to monitor for cancellation requests. + /// The default is . /// The filtered chat history. protected internal virtual ValueTask> UpdateHistoryAsync( IReadOnlyList history, @@ -644,7 +654,8 @@ protected internal virtual ValueTask> UpdateHistoryAsyn /// Determines whether the group chat should be terminated based on the provided chat history and iteration count. /// /// The chat history to consider. - /// The to monitor for cancellation requests. The default is . + /// The to monitor for cancellation requests. + /// The default is . /// A indicating whether the chat should be terminated. protected internal virtual ValueTask ShouldTerminateAsync( IReadOnlyList history, @@ -789,35 +800,35 @@ private sealed class GroupChatHost(AIAgent[] agents, Dictionary routeBuilder - .AddHandler((message, context) => this._pendingMessages.Add(new(ChatRole.User, message))) - .AddHandler((message, context) => this._pendingMessages.Add(message)) - .AddHandler>((messages, _) => this._pendingMessages.AddRange(messages)) - .AddHandler((messages, _) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed - .AddHandler>((messages, _) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed - .AddHandler(async (token, context) => + .AddHandler((message, context, _) => this._pendingMessages.Add(new(ChatRole.User, message))) + .AddHandler((message, context, _) => this._pendingMessages.Add(message)) + .AddHandler>((messages, _, __) => this._pendingMessages.AddRange(messages)) + .AddHandler((messages, _, __) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed + .AddHandler>((messages, _, __) => this._pendingMessages.AddRange(messages)) // TODO: Remove once https://github.com/microsoft/agent-framework/issues/782 is addressed + .AddHandler(async (token, context, cancellationToken) => { List messages = [.. this._pendingMessages]; this._pendingMessages.Clear(); this._manager ??= this._managerFactory(this._agents); - if (!await this._manager.ShouldTerminateAsync(messages).ConfigureAwait(false)) + if (!await this._manager.ShouldTerminateAsync(messages, cancellationToken).ConfigureAwait(false)) { - var filtered = await this._manager.UpdateHistoryAsync(messages).ConfigureAwait(false); + var filtered = await this._manager.UpdateHistoryAsync(messages, cancellationToken).ConfigureAwait(false); messages = filtered is null || ReferenceEquals(filtered, messages) ? messages : [.. filtered]; - if (await this._manager.SelectNextAgentAsync(messages).ConfigureAwait(false) is AIAgent nextAgent && + if (await this._manager.SelectNextAgentAsync(messages, cancellationToken).ConfigureAwait(false) is AIAgent nextAgent && this._agentMap.TryGetValue(nextAgent, out var executor)) { this._manager.IterationCount++; - await context.SendMessageAsync(messages, executor.Id).ConfigureAwait(false); - await context.SendMessageAsync(token, executor.Id).ConfigureAwait(false); + await context.SendMessageAsync(messages, executor.Id, cancellationToken).ConfigureAwait(false); + await context.SendMessageAsync(token, executor.Id, cancellationToken).ConfigureAwait(false); return; } } this._manager = null; - await context.YieldOutputAsync(messages).ConfigureAwait(false); + await context.YieldOutputAsync(messages, cancellationToken).ConfigureAwait(false); }); public ValueTask ResetAsync() diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/AggregatingExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/AggregatingExecutor.cs index c6cb36abe3..06f6ce243a 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/AggregatingExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/AggregatingExecutor.cs @@ -28,7 +28,7 @@ public class AggregatingExecutor(string id, private TAggregate? _runningAggregate; /// - public override ValueTask HandleAsync(TInput message, IWorkflowContext context) + public override ValueTask HandleAsync(TInput message, IWorkflowContext context, CancellationToken cancellationToken = default) { this._runningAggregate = aggregator(this._runningAggregate, message); return new(this._runningAggregate); @@ -37,7 +37,7 @@ public class AggregatingExecutor(string id, /// protected internal override async ValueTask OnCheckpointingAsync(IWorkflowContext context, CancellationToken cancellationToken = default) { - await context.QueueStateUpdateAsync(AggregateStateKey, this._runningAggregate).ConfigureAwait(false); + await context.QueueStateUpdateAsync(AggregateStateKey, this._runningAggregate, cancellationToken: cancellationToken).ConfigureAwait(false); await base.OnCheckpointingAsync(context, cancellationToken).ConfigureAwait(false); } @@ -47,6 +47,6 @@ protected internal override async ValueTask OnCheckpointRestoredAsync(IWorkflowC { await base.OnCheckpointRestoredAsync(context, cancellationToken).ConfigureAwait(false); - this._runningAggregate = await context.ReadStateAsync(AggregateStateKey).ConfigureAwait(false); + this._runningAggregate = await context.ReadStateAsync(AggregateStateKey, cancellationToken: cancellationToken).ConfigureAwait(false); } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/AsyncBarrier.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/AsyncBarrier.cs index 99f8a1fa62..2cd0e84347 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/AsyncBarrier.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/AsyncBarrier.cs @@ -10,7 +10,7 @@ internal sealed class AsyncBarrier() { private readonly InitLocked> _completionSource = new(); - public async ValueTask JoinAsync(CancellationToken cancellation = default) + public async ValueTask JoinAsync(CancellationToken cancellationToken = default) { this._completionSource.Init(() => new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously)); TaskCompletionSource completionSource = this._completionSource.Get()!; @@ -19,10 +19,10 @@ public async ValueTask JoinAsync(CancellationToken cancellation = default) // should not cancel the entire barrier. TaskCompletionSource cancellationSource = new(); - using CancellationTokenRegistration registration = cancellation.Register(() => cancellationSource.SetResult(new())); + using CancellationTokenRegistration registration = cancellationToken.Register(() => cancellationSource.SetResult(new())); await Task.WhenAny(completionSource.Task, cancellationSource.Task).ConfigureAwait(false); - return !cancellation.IsCancellationRequested; + return !cancellationToken.IsCancellationRequested; } public bool ReleaseBarrier() diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/AsyncCoordinator.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/AsyncCoordinator.cs index 835a78ab2a..e1478396c8 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/AsyncCoordinator.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/AsyncCoordinator.cs @@ -12,12 +12,13 @@ internal sealed class AsyncCoordinator /// /// Wait for the Coordination owner to mark the next coordination point, then continue execution. /// - /// A cancellation token that can be used to cancel the wait. + /// The to monitor for cancellation requests. + /// The default is . /// /// A task that represents the asynchronous operation. The task result is /// if the wait was completed; otherwise, for example, if the wait was cancelled, . /// - public async ValueTask WaitForCoordinationAsync(CancellationToken cancellation = default) + public async ValueTask WaitForCoordinationAsync(CancellationToken cancellationToken = default) { // There is a chance that we might get a stale barrier that is getting released if there is a // release happening concurrently with this call. This is by design, and should be considered @@ -26,7 +27,7 @@ public async ValueTask WaitForCoordinationAsync(CancellationToken cancella ?? Interlocked.CompareExchange(ref this._coordinationBarrier, new(), null) ?? this._coordinationBarrier!; // Re-read after setting - return await actualBarrier.JoinAsync(cancellation).ConfigureAwait(false); + return await actualBarrier.JoinAsync(cancellationToken).ConfigureAwait(false); } /// diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocolExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocolExecutor.cs index afcc7e52cc..9363ca59a0 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocolExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocolExecutor.cs @@ -30,19 +30,21 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) { if (this._stringMessageChatRole.HasValue) { - routeBuilder = routeBuilder.AddHandler((message, _) => this._pendingMessages.Add(new(this._stringMessageChatRole.Value, message))); + routeBuilder = routeBuilder.AddHandler((message, _, __) => this._pendingMessages.Add(new(this._stringMessageChatRole.Value, message))); } - return routeBuilder.AddHandler((message, _) => this._pendingMessages.Add(message)) - .AddHandler>((messages, _) => this._pendingMessages.AddRange(messages)) + // Routing requires exact type matches. The runtime may dispatch either List or ChatMessage[]. + return routeBuilder.AddHandler((message, _, __) => this._pendingMessages.Add(message)) + .AddHandler>((messages, _, __) => this._pendingMessages.AddRange(messages)) + .AddHandler((messages, _, __) => this._pendingMessages.AddRange(messages)) .AddHandler(this.TakeTurnAsync); } - public async ValueTask TakeTurnAsync(TurnToken token, IWorkflowContext context) + public async ValueTask TakeTurnAsync(TurnToken token, IWorkflowContext context, CancellationToken cancellationToken = default) { - await this.TakeTurnAsync(this._pendingMessages, context, token.EmitEvents).ConfigureAwait(false); + await this.TakeTurnAsync(this._pendingMessages, context, token.EmitEvents, cancellationToken).ConfigureAwait(false); this._pendingMessages = []; - await context.SendMessageAsync(token).ConfigureAwait(false); + await context.SendMessageAsync(token, cancellationToken: cancellationToken).ConfigureAwait(false); } protected abstract ValueTask TakeTurnAsync(List messages, IWorkflowContext context, bool? emitEvents, CancellationToken cancellationToken = default); @@ -54,7 +56,7 @@ protected internal override async ValueTask OnCheckpointingAsync(IWorkflowContex if (this._pendingMessages.Count > 0) { JsonElement messagesValue = this._pendingMessages.Serialize(); - messagesTask = context.QueueStateUpdateAsync(PendingMessagesStateKey, messagesValue).AsTask(); + messagesTask = context.QueueStateUpdateAsync(PendingMessagesStateKey, messagesValue, cancellationToken: cancellationToken).AsTask(); } await messagesTask.ConfigureAwait(false); @@ -62,7 +64,7 @@ protected internal override async ValueTask OnCheckpointingAsync(IWorkflowContex protected internal override async ValueTask OnCheckpointRestoredAsync(IWorkflowContext context, CancellationToken cancellationToken = default) { - JsonElement? messagesValue = await context.ReadStateAsync(PendingMessagesStateKey).ConfigureAwait(false); + JsonElement? messagesValue = await context.ReadStateAsync(PendingMessagesStateKey, cancellationToken: cancellationToken).ConfigureAwait(false); if (messagesValue.HasValue) { List messages = messagesValue.Value.DeserializeMessages(); diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/AsyncRunHandle.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/AsyncRunHandle.cs index 8fe6edce2a..de1a1dfc7f 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/AsyncRunHandle.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/AsyncRunHandle.cs @@ -44,19 +44,14 @@ internal AsyncRunHandle(ISuperStepRunner stepRunner, ICheckpointingHandle checkp } } - //private readonly AsyncCoordinator _waitForResponseCoordinator = new(); - - //public ValueTask WaitForNextInputAsync(CancellationToken cancellation = default) - // => this._waitForResponseCoordinator.WaitForCoordinationAsync(cancellation); - public string RunId => this._stepRunner.RunId; public IReadOnlyList Checkpoints => this._checkpointingHandle.Checkpoints; - public ValueTask GetStatusAsync(CancellationToken cancellation = default) - => this._eventStream.GetStatusAsync(cancellation); + public ValueTask GetStatusAsync(CancellationToken cancellationToken = default) + => this._eventStream.GetStatusAsync(cancellationToken); - public async IAsyncEnumerable TakeEventStreamAsync(bool blockOnPendingRequest, [EnumeratorCancellation] CancellationToken cancellation = default) + public async IAsyncEnumerable TakeEventStreamAsync(bool blockOnPendingRequest, [EnumeratorCancellation] CancellationToken cancellationToken = default) { //Debug.Assert(breakOnHalt); // Enforce single active enumerator (this runs when enumeration begins) @@ -68,7 +63,7 @@ public async IAsyncEnumerable TakeEventStreamAsync(bool blockOnPe CancellationTokenSource? linked = null; try { - linked = CancellationTokenSource.CreateLinkedTokenSource(cancellation, this._endRunSource.Token); + linked = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken, this._endRunSource.Token); var token = linked.Token; // Build the inner stream before the loop so synchronous exceptions still release the gate @@ -92,21 +87,21 @@ public async IAsyncEnumerable TakeEventStreamAsync(bool blockOnPe } } - public ValueTask IsValidInputTypeAsync(CancellationToken cancellation = default) - => this._stepRunner.IsValidInputTypeAsync(cancellation); + public ValueTask IsValidInputTypeAsync(CancellationToken cancellationToken = default) + => this._stepRunner.IsValidInputTypeAsync(cancellationToken); - public async ValueTask EnqueueMessageAsync(T message, CancellationToken cancellation = default) + public async ValueTask EnqueueMessageAsync(T message, CancellationToken cancellationToken = default) { if (message is ExternalResponse response) { // EnqueueResponseAsync handles signaling - await this.EnqueueResponseAsync(response, cancellation) + await this.EnqueueResponseAsync(response, cancellationToken) .ConfigureAwait(false); return true; } - bool result = await this._stepRunner.EnqueueMessageAsync(message, cancellation) + bool result = await this._stepRunner.EnqueueMessageAsync(message, cancellationToken) .ConfigureAwait(false); // Signal the run loop that new input is available @@ -115,7 +110,7 @@ await this.EnqueueResponseAsync(response, cancellation) return result; } - public async ValueTask EnqueueMessageUntypedAsync([NotNull] object message, Type? declaredType = null, CancellationToken cancellation = default) + public async ValueTask EnqueueMessageUntypedAsync([NotNull] object message, Type? declaredType = null, CancellationToken cancellationToken = default) { if (declaredType?.IsInstanceOfType(message) == false) { @@ -125,7 +120,7 @@ public async ValueTask EnqueueMessageUntypedAsync([NotNull] object message if (declaredType != null && typeof(ExternalResponse).IsAssignableFrom(declaredType)) { // EnqueueResponseAsync handles signaling - await this.EnqueueResponseAsync((ExternalResponse)message, cancellation) + await this.EnqueueResponseAsync((ExternalResponse)message, cancellationToken) .ConfigureAwait(false); return true; @@ -133,13 +128,13 @@ await this.EnqueueResponseAsync((ExternalResponse)message, cancellation) else if (declaredType == null && message is ExternalResponse response) { // EnqueueResponseAsync handles signaling - await this.EnqueueResponseAsync(response, cancellation) + await this.EnqueueResponseAsync(response, cancellationToken) .ConfigureAwait(false); return true; } - bool result = await this._stepRunner.EnqueueMessageUntypedAsync(message, declaredType ?? message.GetType(), cancellation) + bool result = await this._stepRunner.EnqueueMessageUntypedAsync(message, declaredType ?? message.GetType(), cancellationToken) .ConfigureAwait(false); // Signal the run loop that new input is available @@ -148,9 +143,9 @@ await this.EnqueueResponseAsync(response, cancellation) return result; } - public async ValueTask EnqueueResponseAsync(ExternalResponse response, CancellationToken cancellation = default) + public async ValueTask EnqueueResponseAsync(ExternalResponse response, CancellationToken cancellationToken = default) { - await this._stepRunner.EnqueueResponseAsync(response, cancellation).ConfigureAwait(false); + await this._stepRunner.EnqueueResponseAsync(response, cancellationToken).ConfigureAwait(false); // Signal the run loop that new input is available this.SignalInputToRunLoop(); diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/AsyncRunHandleExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/AsyncRunHandleExtensions.cs index 65b60cd6a8..c7ac339a0c 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/AsyncRunHandleExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/AsyncRunHandleExtensions.cs @@ -14,33 +14,33 @@ public async static ValueTask> WithCheckpointingAsync(run, runHandle); } - public static async ValueTask EnqueueAndStreamAsync(this AsyncRunHandle runHandle, TInput input, CancellationToken cancellation = default) + public static async ValueTask EnqueueAndStreamAsync(this AsyncRunHandle runHandle, TInput input, CancellationToken cancellationToken = default) { - await runHandle.EnqueueMessageAsync(input, cancellation).ConfigureAwait(false); + await runHandle.EnqueueMessageAsync(input, cancellationToken).ConfigureAwait(false); return new(runHandle); } - public static async ValueTask EnqueueUntypedAndStreamAsync(this AsyncRunHandle runHandle, object input, CancellationToken cancellation = default) + public static async ValueTask EnqueueUntypedAndStreamAsync(this AsyncRunHandle runHandle, object input, CancellationToken cancellationToken = default) { - await runHandle.EnqueueMessageUntypedAsync(input, cancellation: cancellation).ConfigureAwait(false); + await runHandle.EnqueueMessageUntypedAsync(input, cancellationToken: cancellationToken).ConfigureAwait(false); return new(runHandle); } - public static async ValueTask EnqueueAndRunAsync(this AsyncRunHandle runHandle, TInput input, CancellationToken cancellation = default) + public static async ValueTask EnqueueAndRunAsync(this AsyncRunHandle runHandle, TInput input, CancellationToken cancellationToken = default) { - await runHandle.EnqueueMessageAsync(input, cancellation).ConfigureAwait(false); + await runHandle.EnqueueMessageAsync(input, cancellationToken).ConfigureAwait(false); Run run = new(runHandle); - await run.RunToNextHaltAsync(cancellation).ConfigureAwait(false); + await run.RunToNextHaltAsync(cancellationToken).ConfigureAwait(false); return run; } - public static async ValueTask EnqueueUntypedAndRunAsync(this AsyncRunHandle runHandle, object input, CancellationToken cancellation = default) + public static async ValueTask EnqueueUntypedAndRunAsync(this AsyncRunHandle runHandle, object input, CancellationToken cancellationToken = default) { - await runHandle.EnqueueMessageUntypedAsync(input, cancellation: cancellation).ConfigureAwait(false); + await runHandle.EnqueueMessageUntypedAsync(input, cancellationToken: cancellationToken).ConfigureAwait(false); Run run = new(runHandle); - await run.RunToNextHaltAsync(cancellation).ConfigureAwait(false); + await run.RunToNextHaltAsync(cancellationToken).ConfigureAwait(false); return run; } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/CallResult.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/CallResult.cs index 655aa1597a..952b48cd6c 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/CallResult.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/CallResult.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. using System; +using System.Threading; using Microsoft.Shared.Diagnostics; namespace Microsoft.Agents.AI.Workflows.Execution; @@ -26,16 +27,22 @@ internal sealed class CallResult /// public Exception? Exception { get; init; } + /// + /// Indicated whether the call was cancelled (e.g., via a ). + /// + public bool IsCancelled { get; init; } + /// /// Indicates whether the call was successful. A call is considered successful if it returned /// without throwing an exception. /// - public bool IsSuccess => this.Exception is null; + public bool IsSuccess => this.Exception is null && !this.IsCancelled; - private CallResult(bool isVoid = false) + private CallResult(bool isVoid = false, bool isCancelled = false) { // Private constructor to enforce use of static methods. this.IsVoid = isVoid; + this.IsCancelled = isCancelled; } /// @@ -51,6 +58,14 @@ private CallResult(bool isVoid = false) /// A indicating the result of the call. public static CallResult ReturnVoid() => new(isVoid: true); + /// + /// Create a indicating that the call was cancelled. + /// + /// A boolean specifying whether the call was void (was not expected to return + /// a value). + /// A indicating the result of the call. + public static CallResult Cancelled(bool wasVoid) => new(wasVoid, isCancelled: true); + /// /// Create a indicating that an exception was raised during the call. /// diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/IInputCoordinator.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/IInputCoordinator.cs deleted file mode 100644 index ed9f4bf22d..0000000000 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/IInputCoordinator.cs +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -using System.Threading; -using System.Threading.Tasks; - -namespace Microsoft.Agents.AI.Workflows.Execution; - -internal interface IInputCoordinator -{ - ValueTask WaitForNextInputAsync(CancellationToken cancellation = default); -} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/IRunEventStream.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/IRunEventStream.cs index 5ffce6ce36..dfc35c7566 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/IRunEventStream.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/IRunEventStream.cs @@ -15,7 +15,7 @@ internal interface IRunEventStream : IAsyncDisposable // this cannot be cancelled ValueTask StopAsync(); - ValueTask GetStatusAsync(CancellationToken cancellation = default); + ValueTask GetStatusAsync(CancellationToken cancellationToken = default); - IAsyncEnumerable TakeEventStreamAsync(bool blockOnPendingRequest, CancellationToken cancellation = default); + IAsyncEnumerable TakeEventStreamAsync(bool blockOnPendingRequest, CancellationToken cancellationToken = default); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/IRunnerContext.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/IRunnerContext.cs index b3a988306c..f3fc762336 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/IRunnerContext.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/IRunnerContext.cs @@ -1,16 +1,17 @@ // Copyright (c) Microsoft. All rights reserved. using System.Collections.Generic; +using System.Threading; using System.Threading.Tasks; namespace Microsoft.Agents.AI.Workflows.Execution; internal interface IRunnerContext : IExternalRequestSink, ISuperStepJoinContext { - ValueTask AddEventAsync(WorkflowEvent workflowEvent); - ValueTask SendMessageAsync(string sourceId, object message, string? targetId = null); + ValueTask AddEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default); + ValueTask SendMessageAsync(string sourceId, object message, string? targetId = null, CancellationToken cancellationToken = default); - ValueTask AdvanceAsync(); + ValueTask AdvanceAsync(CancellationToken cancellationToken = default); IWorkflowContext Bind(string executorId, Dictionary? traceContext = null); - ValueTask EnsureExecutorAsync(string executorId, IStepTracer? tracer); + ValueTask EnsureExecutorAsync(string executorId, IStepTracer? tracer, CancellationToken cancellationToken = default); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepJoinContext.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepJoinContext.cs index 1e27fe6b4d..ed4ded17d6 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepJoinContext.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepJoinContext.cs @@ -10,8 +10,8 @@ internal interface ISuperStepJoinContext { bool WithCheckpointing { get; } - ValueTask ForwardWorkflowEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellation = default); - ValueTask SendMessageAsync(string senderId, [DisallowNull] TMessage message, CancellationToken cancellation = default); + ValueTask ForwardWorkflowEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default); + ValueTask SendMessageAsync(string senderId, [DisallowNull] TMessage message, CancellationToken cancellationToken = default); - ValueTask AttachSuperstepAsync(ISuperStepRunner superStepRunner, CancellationToken cancellation = default); + ValueTask AttachSuperstepAsync(ISuperStepRunner superStepRunner, CancellationToken cancellationToken = default); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepRunner.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepRunner.cs index 7384dfc4d0..a7923a7d9b 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepRunner.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepRunner.cs @@ -15,11 +15,11 @@ internal interface ISuperStepRunner bool HasUnservicedRequests { get; } bool HasUnprocessedMessages { get; } - ValueTask EnqueueResponseAsync(ExternalResponse response, CancellationToken cancellation = default); + ValueTask EnqueueResponseAsync(ExternalResponse response, CancellationToken cancellationToken = default); - ValueTask IsValidInputTypeAsync(CancellationToken cancellation = default); - ValueTask EnqueueMessageAsync(T message, CancellationToken cancellation = default); - ValueTask EnqueueMessageUntypedAsync(object message, Type declaredType, CancellationToken cancellation = default); + ValueTask IsValidInputTypeAsync(CancellationToken cancellationToken = default); + ValueTask EnqueueMessageAsync(T message, CancellationToken cancellationToken = default); + ValueTask EnqueueMessageUntypedAsync(object message, Type declaredType, CancellationToken cancellationToken = default); ConcurrentEventSink OutgoingEvents { get; } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/InputWaiter.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/InputWaiter.cs index f86f3721bb..d50f284f48 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/InputWaiter.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/InputWaiter.cs @@ -33,10 +33,10 @@ public void SignalInput() } } - public Task WaitForInputAsync(CancellationToken cancellation = default) => this.WaitForInputAsync(null, cancellation); + public Task WaitForInputAsync(CancellationToken cancellationToken = default) => this.WaitForInputAsync(null, cancellationToken); - public async Task WaitForInputAsync(TimeSpan? timeout = null, CancellationToken cancellation = default) + public async Task WaitForInputAsync(TimeSpan? timeout = null, CancellationToken cancellationToken = default) { - await this._inputSignal.WaitAsync(timeout ?? TimeSpan.FromMilliseconds(-1), cancellation).ConfigureAwait(false); + await this._inputSignal.WaitAsync(timeout ?? TimeSpan.FromMilliseconds(-1), cancellationToken).ConfigureAwait(false); } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/LockstepRunEventStream.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/LockstepRunEventStream.cs index a66f0af978..b47a692113 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/LockstepRunEventStream.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/LockstepRunEventStream.cs @@ -22,7 +22,7 @@ internal sealed class LockstepRunEventStream : IRunEventStream private readonly ISuperStepRunner _stepRunner; - public ValueTask GetStatusAsync(CancellationToken cancellation = default) => new(this.RunStatus); + public ValueTask GetStatusAsync(CancellationToken cancellationToken = default) => new(this.RunStatus); public LockstepRunEventStream(ISuperStepRunner stepRunner) { @@ -36,7 +36,7 @@ public void Start() // No-op for lockstep execution } - public async IAsyncEnumerable TakeEventStreamAsync(bool blockOnPendingRequest, [EnumeratorCancellation] CancellationToken cancellation = default) + public async IAsyncEnumerable TakeEventStreamAsync(bool blockOnPendingRequest, [EnumeratorCancellation] CancellationToken cancellationToken = default) { #if NET ObjectDisposedException.ThrowIf(Volatile.Read(ref this._isDisposed) == 1, this); @@ -47,7 +47,7 @@ public async IAsyncEnumerable TakeEventStreamAsync(bool blockOnPe } #endif - CancellationTokenSource linkedSource = CancellationTokenSource.CreateLinkedTokenSource(this._stopCancellation.Token, cancellation); + CancellationTokenSource linkedSource = CancellationTokenSource.CreateLinkedTokenSource(this._stopCancellation.Token, cancellationToken); ConcurrentQueue eventSink = []; diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/MessageRouter.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/MessageRouter.cs index 0a23105c74..10ce345ad8 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/MessageRouter.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/MessageRouter.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Linq; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.Checkpointing; using Microsoft.Shared.Diagnostics; @@ -11,12 +12,14 @@ System.Func< Microsoft.Agents.AI.Workflows.PortableValue, // message Microsoft.Agents.AI.Workflows.IWorkflowContext, // context + System.Threading.CancellationToken, // cancellation System.Threading.Tasks.ValueTask >; using MessageHandlerF = System.Func< object, // message Microsoft.Agents.AI.Workflows.IWorkflowContext, // context + System.Threading.CancellationToken, // cancellation System.Threading.Tasks.ValueTask >; @@ -56,7 +59,7 @@ public bool CanHandle(TypeId candidateType) public HashSet DefaultOutputTypes { get; } - public async ValueTask RouteMessageAsync(object message, IWorkflowContext context, bool requireRoute = false) + public async ValueTask RouteMessageAsync(object message, IWorkflowContext context, bool requireRoute = false, CancellationToken cancellationToken = default) { Throw.IfNull(message); @@ -74,13 +77,13 @@ public bool CanHandle(TypeId candidateType) { if (this._typedHandlers.TryGetValue(message.GetType(), out MessageHandlerF? handler)) { - result = await handler(message, context).ConfigureAwait(false); + result = await handler(message, context, cancellationToken).ConfigureAwait(false); } else if (this.HasCatchAll) { portableValue ??= new PortableValue(message); - result = await this._catchAllFunc(portableValue, context).ConfigureAwait(false); + result = await this._catchAllFunc(portableValue, context, cancellationToken).ConfigureAwait(false); } } catch (Exception e) diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StateManager.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StateManager.cs index 3e7ef91e0c..ffa289eaf9 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StateManager.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StateManager.cs @@ -99,6 +99,13 @@ public async ValueTask> ReadKeysAsync(ScopeId scopeId) public ValueTask ReadStateAsync(ScopeId scopeId, string key) { + if (typeof(T) == typeof(object)) + { + // Reading as object will break across serialize/deserialize boundaries, e.g. checkpointing, distributed runtime, etc. + // Disabled pending upstream updates for this change; see https://github.com/microsoft/agent-framework/issues/1369 + //throw new NotSupportedException("Reading state as 'object' is not supported. Use 'PortableValue' instead for variants."); + } + Throw.IfNullOrEmpty(key); UpdateKey stateKey = new(scopeId, key); @@ -116,6 +123,16 @@ public async ValueTask> ReadKeysAsync(ScopeId scopeId) { return new((T?)result.Value); } + else if (result.Value == null) + { + // Technically should only happen if T is nullable, but we don't have the ability to express that + // so we cannot `return new((T?)null);` directly. + return new((T?)default); + } + else if (typeof(T) == typeof(PortableValue)) + { + return new((T)(object)new PortableValue(result.Value)); + } throw new InvalidOperationException($"State for key '{key}' in scope '{scopeId}' is not of type '{typeof(T).Name}'."); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StateScope.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StateScope.cs index 607f97c351..e1c50ab1a3 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StateScope.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StateScope.cs @@ -51,6 +51,13 @@ public bool ContainsKey(string key) Throw.IfNullOrEmpty(key); if (this._stateData.TryGetValue(key, out PortableValue? value)) { + if (typeof(T) == typeof(PortableValue) && !value.TypeId.IsMatch(typeof(PortableValue))) + { + // value is PortableValue, and we do not need to unwrap a PortableValue instance inside of it + // Unfortunately we need to cast through object here. + return new((T)(object)value); + } + return new(value.As()); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StreamingRunEventStream.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StreamingRunEventStream.cs index 0f941ea733..718ebcd11c 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StreamingRunEventStream.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/StreamingRunEventStream.cs @@ -50,10 +50,10 @@ public void Start() } } - private async Task RunLoopAsync(CancellationToken cancellation) + private async Task RunLoopAsync(CancellationToken cancellationToken) { using CancellationTokenSource errorSource = new(); - CancellationTokenSource linkedSource = CancellationTokenSource.CreateLinkedTokenSource(errorSource.Token, cancellation); + CancellationTokenSource linkedSource = CancellationTokenSource.CreateLinkedTokenSource(errorSource.Token, cancellationToken); // Subscribe to events - they will flow directly to the channel as they're raised this._stepRunner.OutgoingEvents.EventRaised += OnEventRaisedAsync; @@ -62,7 +62,7 @@ private async Task RunLoopAsync(CancellationToken cancellation) { // Wait for the first input before starting // The consumer will call EnqueueMessageAsync which signals the run loop - await this._inputWaiter.WaitForInputAsync(cancellation: linkedSource.Token).ConfigureAwait(false); + await this._inputWaiter.WaitForInputAsync(cancellationToken: linkedSource.Token).ConfigureAwait(false); this._runStatus = RunStatus.Running; @@ -134,7 +134,7 @@ async ValueTask OnEventRaisedAsync(object? sender, WorkflowEvent e) public async IAsyncEnumerable TakeEventStreamAsync( bool blockOnPendingRequest, - [EnumeratorCancellation] CancellationToken cancellation = default) + [EnumeratorCancellation] CancellationToken cancellationToken = default) { // Get the current epoch - we'll only respond to completion signals from this epoch or later int myEpoch = Volatile.Read(ref this._completionEpoch) + 1; @@ -143,7 +143,7 @@ public async IAsyncEnumerable TakeEventStreamAsync( // Note: When cancellation is requested, ReadAllAsync may throw OperationCanceledException // or may complete the enumeration. We check IsCancellationRequested explicitly at superstep // boundaries to ensure clean cancellation. - await foreach (WorkflowEvent evt in this._eventChannel.Reader.ReadAllAsync(cancellation).ConfigureAwait(false)) + await foreach (WorkflowEvent evt in this._eventChannel.Reader.ReadAllAsync(cancellationToken).ConfigureAwait(false)) { // Filter out internal signals used for run loop coordination if (evt is InternalHaltSignal completionSignal) @@ -156,7 +156,7 @@ public async IAsyncEnumerable TakeEventStreamAsync( // Check for cancellation at superstep boundaries (before processing completion signal) // This allows consumers to stop reading events cleanly between supersteps - if (cancellation.IsCancellationRequested) + if (cancellationToken.IsCancellationRequested) { yield break; } @@ -186,7 +186,7 @@ public async IAsyncEnumerable TakeEventStreamAsync( yield break; } - if (cancellation.IsCancellationRequested) + if (cancellationToken.IsCancellationRequested) { yield break; } @@ -195,7 +195,7 @@ public async IAsyncEnumerable TakeEventStreamAsync( } } - public ValueTask GetStatusAsync(CancellationToken cancellation = default) + public ValueTask GetStatusAsync(CancellationToken cancellationToken = default) { // Thread-safe read of status (enum is read atomically on most platforms) return new ValueTask(this._runStatus); diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Executor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Executor.cs index e1df4fb37e..cd97fcaea4 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Executor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Executor.cs @@ -90,10 +90,12 @@ internal MessageRouter Router /// The "declared" type of the message (captured when it was being sent). This is /// used to enable routing messages as their base types, in absence of true polymorphic type routing. /// The workflow context in which the executor executes. + /// The to monitor for cancellation requests. + /// The default is . /// A ValueTask representing the asynchronous operation, wrapping the output from the executor. /// No handler found for the message type. /// An exception is generated while handling the message. - public async ValueTask ExecuteAsync(object message, TypeId messageType, IWorkflowContext context) + public async ValueTask ExecuteAsync(object message, TypeId messageType, IWorkflowContext context, CancellationToken cancellationToken = default) { using var activity = s_activitySource.StartActivity(ActivityNames.ExecutorProcess, ActivityKind.Internal); activity?.SetTag(Tags.ExecutorId, this.Id) @@ -101,9 +103,9 @@ internal MessageRouter Router .SetTag(Tags.MessageType, messageType.TypeName) .CreateSourceLinks(context.TraceContext); - await context.AddEventAsync(new ExecutorInvokedEvent(this.Id, message)).ConfigureAwait(false); + await context.AddEventAsync(new ExecutorInvokedEvent(this.Id, message), cancellationToken).ConfigureAwait(false); - CallResult? result = await this.Router.RouteMessageAsync(message, context, requireRoute: true) + CallResult? result = await this.Router.RouteMessageAsync(message, context, requireRoute: true, cancellationToken) .ConfigureAwait(false); ExecutorEvent executionResult; @@ -116,7 +118,7 @@ internal MessageRouter Router executionResult = new ExecutorFailedEvent(this.Id, result.Exception); } - await context.AddEventAsync(executionResult).ConfigureAwait(false); + await context.AddEventAsync(executionResult, cancellationToken).ConfigureAwait(false); if (result is null) { @@ -137,11 +139,11 @@ internal MessageRouter Router // If we had a real return type, raise it as a SendMessage; TODO: Should we have a way to disable this behaviour? if (result.Result is not null && this.Options.AutoSendMessageHandlerResultObject) { - await context.SendMessageAsync(result.Result).ConfigureAwait(false); + await context.SendMessageAsync(result.Result, cancellationToken: cancellationToken).ConfigureAwait(false); } if (result.Result is not null && this.Options.AutoYieldOutputHandlerResultObject) { - await context.YieldOutputAsync(result.Result).ConfigureAwait(false); + await context.YieldOutputAsync(result.Result, cancellationToken).ConfigureAwait(false); } return result.Result; @@ -152,7 +154,8 @@ internal MessageRouter Router /// /// The workflow context. /// A ValueTask representing the asynchronous operation. - /// The to monitor for cancellation requests. The default is . + /// The to monitor for cancellation requests. + /// The default is . protected internal virtual ValueTask OnCheckpointingAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => default; /// @@ -160,7 +163,8 @@ internal MessageRouter Router /// /// The workflow context. /// A ValueTask representing the asynchronous operation. - /// The to monitor for cancellation requests. The default is . + /// The to monitor for cancellation requests. + /// The default is . protected internal virtual ValueTask OnCheckpointRestoredAsync(IWorkflowContext context, CancellationToken cancellationToken = default) => default; /// @@ -210,7 +214,7 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => routeBuilder.AddHandler(this.HandleAsync); /// - public abstract ValueTask HandleAsync(TInput message, IWorkflowContext context); + public abstract ValueTask HandleAsync(TInput message, IWorkflowContext context, CancellationToken cancellationToken = default); } /// @@ -229,5 +233,5 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => routeBuilder.AddHandler(this.HandleAsync); /// - public abstract ValueTask HandleAsync(TInput message, IWorkflowContext context); + public abstract ValueTask HandleAsync(TInput message, IWorkflowContext context, CancellationToken cancellationToken = default); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/FunctionExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/FunctionExecutor.cs index 0db15f42bf..dd692165c7 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/FunctionExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/FunctionExecutor.cs @@ -29,7 +29,7 @@ ValueTask RunActionAsync(TInput input, IWorkflowContext workflowContext, Cancell } /// - public override ValueTask HandleAsync(TInput message, IWorkflowContext context) => handlerAsync(message, context, default); + public override ValueTask HandleAsync(TInput message, IWorkflowContext context, CancellationToken cancellationToken) => handlerAsync(message, context, cancellationToken); /// /// Creates a new instance of the class. @@ -65,7 +65,7 @@ ValueTask RunFuncAsync(TInput input, IWorkflowContext workflowContext, } /// - public override ValueTask HandleAsync(TInput message, IWorkflowContext context) => handlerAsync(message, context, default); + public override ValueTask HandleAsync(TInput message, IWorkflowContext context, CancellationToken cancellationToken) => handlerAsync(message, context, cancellationToken); /// /// Creates a new instance of the class. diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/IWorkflowContext.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/IWorkflowContext.cs index eed3b15d72..efc074c2d4 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/IWorkflowContext.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/IWorkflowContext.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. using System.Collections.Generic; +using System.Threading; using System.Threading.Tasks; namespace Microsoft.Agents.AI.Workflows; @@ -15,8 +16,10 @@ public interface IWorkflowContext /// end of the current SuperStep. /// /// The event to be raised. + /// The to monitor for cancellation requests. + /// The default is . /// A representing the asynchronous operation. - ValueTask AddEventAsync(WorkflowEvent workflowEvent); + ValueTask AddEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default); /// /// Queues a message to be sent to connected executors. The message will be sent during the next SuperStep. @@ -25,8 +28,22 @@ public interface IWorkflowContext /// An optional identifier of the target executor. If null, the message is sent to all connected /// executors. If the target executor is not connected from this executor via an edge, it will still not receive the /// message. + /// The to monitor for cancellation requests. + /// The default is . /// A representing the asynchronous operation. - ValueTask SendMessageAsync(object message, string? targetId = null); + ValueTask SendMessageAsync(object message, string? targetId = null, CancellationToken cancellationToken = default); + +#if NET // What's the right way to do this so we do not make life a misery for netstandard2.0 targets? + // What's the value if they have to still write `cancellationToken: cancellationToken` to skip the targetId parameter? + // TODO: Remove this? (Maybe not: NET will eventually be the only target framework, right?) + /// + /// Queues a message to be sent to connected executors. The message will be sent during the next SuperStep. + /// + /// The message to be sent. + /// The to monitor for cancellation requests. + /// A representing the asynchronous operation. + ValueTask SendMessageAsync(object message, CancellationToken cancellationToken) => this.SendMessageAsync(message, null, cancellationToken); +#endif /// /// Adds an output value to the workflow's output queue. These outputs will be bubbled out of the workflow using the @@ -37,8 +54,10 @@ public interface IWorkflowContext /// types of registered message handlers are considered output types, unless otherwise specified using . /// /// The output value to be returned. + /// The to monitor for cancellation requests. + /// The default is . /// A representing the asynchronous operation. - ValueTask YieldOutputAsync(object output); + ValueTask YieldOutputAsync(object output, CancellationToken cancellationToken = default); /// /// Adds a request to "halt" workflow execution at the end of the current SuperStep. @@ -54,15 +73,32 @@ public interface IWorkflowContext /// The key of the state value. /// An optional name that specifies the scope to read.If null, the default scope is /// used. + /// The to monitor for cancellation requests. + /// The default is . + /// A representing the asynchronous operation. + ValueTask ReadStateAsync(string key, string? scopeName = null, CancellationToken cancellationToken = default); + +#if NET // See above for musings about this construction + /// + /// Reads a state value from the workflow's state store. If no scope is provided, the executor's + /// default scope is used. + /// + /// The type of the state value. + /// The key of the state value. + /// The to monitor for cancellation requests. /// A representing the asynchronous operation. - ValueTask ReadStateAsync(string key, string? scopeName = null); + ValueTask ReadStateAsync(string key, CancellationToken cancellationToken) => this.ReadStateAsync(key, null, cancellationToken); + +#endif /// /// Asynchronously reads all state keys within the specified scope. /// /// An optional name that specifies the scope to read. If null, the default scope is /// used. - ValueTask> ReadStateKeysAsync(string? scopeName = null); + /// The to monitor for cancellation requests. + /// The default is . + ValueTask> ReadStateKeysAsync(string? scopeName = null, CancellationToken cancellationToken = default); /// /// Asynchronously updates the state of a queue entry identified by the specified key and optional scope. @@ -77,8 +113,27 @@ public interface IWorkflowContext /// implementation. /// An optional name that specifies the scope to update. If null, the default scope is /// used. + /// The to monitor for cancellation requests. + /// The default is . /// A ValueTask that represents the asynchronous update operation. - ValueTask QueueStateUpdateAsync(string key, T? value, string? scopeName = null); + ValueTask QueueStateUpdateAsync(string key, T? value, string? scopeName = null, CancellationToken cancellationToken = default); + +#if NET // See above for musings about this construction + /// + /// Asynchronously updates the state of a queue entry identified by the specified key and optional scope. + /// + /// + /// Subsequent reads by this executor will result in the new value of the state. Other executors will only see + /// the new state starting from the next SuperStep. + /// + /// The type of the value to associate with the queue entry. + /// The unique identifier for the queue entry to update. Cannot be null or empty. + /// The value to set for the queue entry. If null, the entry's state may be cleared or reset depending on + /// implementation. + /// The to monitor for cancellation requests. + /// A ValueTask that represents the asynchronous update operation. + ValueTask QueueStateUpdateAsync(string key, T? value, CancellationToken cancellationToken) => this.QueueStateUpdateAsync(key, value, null, cancellationToken); +#endif /// /// Asynchronously clears all state entries within the specified scope. @@ -90,8 +145,25 @@ public interface IWorkflowContext /// see the cleared state starting from the next SuperStep. /// /// An optional name that specifies the scope to clear. If null, the default scope is used. + /// The to monitor for cancellation requests. + /// The default is . + /// A ValueTask that represents the asynchronous clear operation. + ValueTask QueueClearScopeAsync(string? scopeName = null, CancellationToken cancellationToken = default); + +#if NET // See above for musings about this construction + /// + /// Asynchronously clears all state entries within the specified scope. + /// + /// This semantically equivalent to retrieving all keys in the scope and deleting them one-by-one. + /// + /// + /// Subsequent reads by this executor will not find any entries in the cleared scope. Other executors will only + /// see the cleared state starting from the next SuperStep. + /// + /// The to monitor for cancellation requests. /// A ValueTask that represents the asynchronous clear operation. - ValueTask QueueClearScopeAsync(string? scopeName = null); + ValueTask QueueClearScopeAsync(CancellationToken cancellationToken) => this.QueueClearScopeAsync(null, cancellationToken); +#endif /// /// The trace context associated with the current message about to be processed by the executor, if any. diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunner.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunner.cs index 6f4cef55c5..6d7a4bd691 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunner.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunner.cs @@ -46,14 +46,14 @@ public InProcessRunner(Workflow workflow, ICheckpointManager? checkpointManager, public string StartExecutorId { get; } private readonly HashSet _knownValidInputTypes; - public async ValueTask IsValidInputTypeAsync(Type messageType, CancellationToken cancellation = default) + public async ValueTask IsValidInputTypeAsync(Type messageType, CancellationToken cancellationToken = default) { if (this._knownValidInputTypes.Contains(messageType)) { return true; } - Executor startingExecutor = await this.RunContext.EnsureExecutorAsync(this.Workflow.StartExecutorId, tracer: null).ConfigureAwait(false); + Executor startingExecutor = await this.RunContext.EnsureExecutorAsync(this.Workflow.StartExecutorId, tracer: null, cancellationToken).ConfigureAwait(false); if (startingExecutor.CanHandle(messageType)) { this._knownValidInputTypes.Add(messageType); @@ -63,10 +63,10 @@ public async ValueTask IsValidInputTypeAsync(Type messageType, Cancellatio return false; } - public ValueTask IsValidInputTypeAsync(CancellationToken cancellation = default) - => this.IsValidInputTypeAsync(typeof(T), cancellation); + public ValueTask IsValidInputTypeAsync(CancellationToken cancellationToken = default) + => this.IsValidInputTypeAsync(typeof(T), cancellationToken); - public async ValueTask EnqueueMessageUntypedAsync(object message, Type declaredType, CancellationToken cancellation = default) + public async ValueTask EnqueueMessageUntypedAsync(object message, Type declaredType, CancellationToken cancellationToken = default) { this.RunContext.CheckEnded(); Throw.IfNull(message); @@ -78,7 +78,7 @@ public async ValueTask EnqueueMessageUntypedAsync(object message, Type dec // Check that the type of the incoming message is compatible with the starting executor's // input type. - if (!await this.IsValidInputTypeAsync(declaredType, cancellation).ConfigureAwait(false)) + if (!await this.IsValidInputTypeAsync(declaredType, cancellationToken).ConfigureAwait(false)) { return false; } @@ -87,13 +87,13 @@ public async ValueTask EnqueueMessageUntypedAsync(object message, Type dec return true; } - public ValueTask EnqueueMessageAsync(T message, CancellationToken cancellation = default) - => this.EnqueueMessageUntypedAsync(Throw.IfNull(message), typeof(T), cancellation); + public ValueTask EnqueueMessageAsync(T message, CancellationToken cancellationToken = default) + => this.EnqueueMessageUntypedAsync(Throw.IfNull(message), typeof(T), cancellationToken); - public ValueTask EnqueueMessageAsync(object message, CancellationToken cancellation = default) - => this.EnqueueMessageUntypedAsync(Throw.IfNull(message), message.GetType(), cancellation); + public ValueTask EnqueueMessageUntypedAsync(object message, CancellationToken cancellationToken = default) + => this.EnqueueMessageUntypedAsync(Throw.IfNull(message), message.GetType(), cancellationToken); - ValueTask ISuperStepRunner.EnqueueResponseAsync(ExternalResponse response, CancellationToken cancellation) + ValueTask ISuperStepRunner.EnqueueResponseAsync(ExternalResponse response, CancellationToken cancellationToken) { // TODO: Check that there exists a corresponding input port? return this.RunContext.AddExternalResponseAsync(response); @@ -110,13 +110,13 @@ ValueTask ISuperStepRunner.EnqueueResponseAsync(ExternalResponse response, Cance private ValueTask RaiseWorkflowEventAsync(WorkflowEvent workflowEvent) => this.OutgoingEvents.EnqueueAsync(workflowEvent); - public ValueTask BeginStreamAsync(ExecutionMode mode, CancellationToken cancellation = default) + public ValueTask BeginStreamAsync(ExecutionMode mode, CancellationToken cancellationToken = default) { this.RunContext.CheckEnded(); return new(new AsyncRunHandle(this, this, mode)); } - public async ValueTask ResumeStreamAsync(ExecutionMode mode, CheckpointInfo fromCheckpoint, CancellationToken cancellation = default) + public async ValueTask ResumeStreamAsync(ExecutionMode mode, CheckpointInfo fromCheckpoint, CancellationToken cancellationToken = default) { this.RunContext.CheckEnded(); Throw.IfNull(fromCheckpoint); @@ -125,7 +125,7 @@ public async ValueTask ResumeStreamAsync(ExecutionMode mode, Che throw new InvalidOperationException("This runner was not configured with a CheckpointManager, so it cannot restore checkpoints."); } - await this.RestoreCheckpointAsync(fromCheckpoint, cancellation).ConfigureAwait(false); + await this.RestoreCheckpointAsync(fromCheckpoint, cancellationToken).ConfigureAwait(false); return new AsyncRunHandle(this, this, mode); } @@ -142,7 +142,7 @@ async ValueTask ISuperStepRunner.RunSuperStepAsync(CancellationToken cance return false; } - StepContext currentStep = await this.RunContext.AdvanceAsync().ConfigureAwait(false); + StepContext currentStep = await this.RunContext.AdvanceAsync(cancellationToken).ConfigureAwait(false); if (currentStep.HasMessages || this.RunContext.HasQueuedExternalDeliveries || @@ -150,7 +150,7 @@ async ValueTask ISuperStepRunner.RunSuperStepAsync(CancellationToken cance { try { - await this.RunSuperstepAsync(currentStep).ConfigureAwait(false); + await this.RunSuperstepAsync(currentStep, cancellationToken).ConfigureAwait(false); } catch (OperationCanceledException) { } @@ -165,9 +165,9 @@ async ValueTask ISuperStepRunner.RunSuperStepAsync(CancellationToken cance return false; } - private async ValueTask DeliverMessagesAsync(string receiverId, ConcurrentQueue envelopes) + private async ValueTask DeliverMessagesAsync(string receiverId, ConcurrentQueue envelopes, CancellationToken cancellationToken) { - Executor executor = await this.RunContext.EnsureExecutorAsync(receiverId, this.StepTracer).ConfigureAwait(false); + Executor executor = await this.RunContext.EnsureExecutorAsync(receiverId, this.StepTracer, cancellationToken).ConfigureAwait(false); this.StepTracer.TraceActivated(receiverId); while (envelopes.TryDequeue(out var envelope)) @@ -175,19 +175,20 @@ private async ValueTask DeliverMessagesAsync(string receiverId, ConcurrentQueue< await executor.ExecuteAsync( envelope.Message, envelope.MessageType, - this.RunContext.Bind(receiverId, envelope.TraceContext) + this.RunContext.Bind(receiverId, envelope.TraceContext), + cancellationToken ).ConfigureAwait(false); } } - private async ValueTask RunSuperstepAsync(StepContext currentStep) + private async ValueTask RunSuperstepAsync(StepContext currentStep, CancellationToken cancellationToken) { await this.RaiseWorkflowEventAsync(this.StepTracer.Advance(currentStep)).ConfigureAwait(false); // Deliver the messages and queue the next step List receiverTasks = currentStep.QueuedMessages.Keys - .Select(receiverId => this.DeliverMessagesAsync(receiverId, currentStep.MessagesFor(receiverId)).AsTask()) + .Select(receiverId => this.DeliverMessagesAsync(receiverId, currentStep.MessagesFor(receiverId), cancellationToken).AsTask()) .ToList(); // TODO: Should we let the user specify that they want strictly turn-based execution of the edges, vs. concurrent? @@ -202,12 +203,12 @@ private async ValueTask RunSuperstepAsync(StepContext currentStep) List subworkflowTasks = new(); foreach (ISuperStepRunner subworkflowRunner in this.RunContext.JoinedSubworkflowRunners) { - subworkflowTasks.Add(subworkflowRunner.RunSuperStepAsync(CancellationToken.None).AsTask()); + subworkflowTasks.Add(subworkflowRunner.RunSuperStepAsync(cancellationToken).AsTask()); } await Task.WhenAll(subworkflowTasks).ConfigureAwait(false); - await this.CheckpointAsync().ConfigureAwait(false); + await this.CheckpointAsync(cancellationToken).ConfigureAwait(false); await this.RaiseWorkflowEventAsync(this.StepTracer.Complete(this.RunContext.NextStepHasActions, this.RunContext.HasUnservicedRequests)) .ConfigureAwait(false); diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunnerContext.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunnerContext.cs index cd3f763ae8..f707c72be0 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunnerContext.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunnerContext.cs @@ -57,7 +57,7 @@ public InProcessRunnerContext( this.OutgoingEvents = outgoingEvents; } - public async ValueTask EnsureExecutorAsync(string executorId, IStepTracer? tracer) + public async ValueTask EnsureExecutorAsync(string executorId, IStepTracer? tracer, CancellationToken cancellationToken = default) { this.CheckEnded(); Task executorTask = this._executors.GetOrAdd(executorId, CreateExecutorAsync); @@ -88,9 +88,9 @@ async Task CreateExecutorAsync(string id) return await executorTask.ConfigureAwait(false); } - public async ValueTask> GetStartingExecutorInputTypesAsync(CancellationToken cancellation = default) + public async ValueTask> GetStartingExecutorInputTypesAsync(CancellationToken cancellationToken = default) { - Executor startingExecutor = await this.EnsureExecutorAsync(this._workflow.StartExecutorId, tracer: null) + Executor startingExecutor = await this.EnsureExecutorAsync(this._workflow.StartExecutorId, tracer: null, cancellationToken) .ConfigureAwait(false); return startingExecutor.InputTypes; @@ -145,7 +145,7 @@ await this._edgeMap.PrepareDeliveryForResponseAsync(response) public bool HasUnservicedRequests => !this._externalRequests.IsEmpty || this._joinedSubworkflowRunners.Any(joinedRunner => joinedRunner.HasUnservicedRequests); - public async ValueTask AdvanceAsync() + public async ValueTask AdvanceAsync(CancellationToken cancellationToken = default) { this.CheckEnded(); @@ -159,7 +159,7 @@ public async ValueTask AdvanceAsync() return Interlocked.Exchange(ref this._nextStep, new StepContext()); } - public ValueTask AddEventAsync(WorkflowEvent workflowEvent) + public ValueTask AddEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default) { this.CheckEnded(); return this.OutgoingEvents.EnqueueAsync(workflowEvent); @@ -168,7 +168,7 @@ public ValueTask AddEventAsync(WorkflowEvent workflowEvent) private static readonly string s_namespace = typeof(IWorkflowContext).Namespace!; private static readonly ActivitySource s_activitySource = new(s_namespace); - public async ValueTask SendMessageAsync(string sourceId, object message, string? targetId = null) + public async ValueTask SendMessageAsync(string sourceId, object message, string? targetId = null, CancellationToken cancellationToken = default) { using Activity? activity = s_activitySource.StartActivity(ActivityNames.MessageSend, ActivityKind.Producer); // Create a carrier for trace context propagation @@ -231,19 +231,19 @@ private sealed class BoundContext( OutputFilter outputFilter, Dictionary? traceContext) : IWorkflowContext { - public ValueTask AddEventAsync(WorkflowEvent workflowEvent) => RunnerContext.AddEventAsync(workflowEvent); + public ValueTask AddEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default) => RunnerContext.AddEventAsync(workflowEvent, cancellationToken); - public ValueTask SendMessageAsync(object message, string? targetId = null) + public ValueTask SendMessageAsync(object message, string? targetId = null, CancellationToken cancellationToken = default) { - return RunnerContext.SendMessageAsync(ExecutorId, message, targetId); + return RunnerContext.SendMessageAsync(ExecutorId, message, targetId, cancellationToken); } - public async ValueTask YieldOutputAsync(object output) + public async ValueTask YieldOutputAsync(object output, CancellationToken cancellationToken = default) { RunnerContext.CheckEnded(); Throw.IfNull(output); - Executor sourceExecutor = await RunnerContext.EnsureExecutorAsync(ExecutorId, tracer: null).ConfigureAwait(false); + Executor sourceExecutor = await RunnerContext.EnsureExecutorAsync(ExecutorId, tracer: null, cancellationToken).ConfigureAwait(false); if (!sourceExecutor.CanOutput(output.GetType())) { throw new InvalidOperationException($"Cannot output object of type {output.GetType().Name}. Expecting one of [{string.Join(", ", sourceExecutor.OutputTypes)}]."); @@ -251,22 +251,22 @@ public async ValueTask YieldOutputAsync(object output) if (outputFilter.CanOutput(ExecutorId, output)) { - await this.AddEventAsync(new WorkflowOutputEvent(output, ExecutorId)).ConfigureAwait(false); + await this.AddEventAsync(new WorkflowOutputEvent(output, ExecutorId), cancellationToken).ConfigureAwait(false); } } public ValueTask RequestHaltAsync() => this.AddEventAsync(new RequestHaltEvent()); - public ValueTask ReadStateAsync(string key, string? scopeName = null) + public ValueTask ReadStateAsync(string key, string? scopeName = null, CancellationToken cancellationToken = default) => RunnerContext.StateManager.ReadStateAsync(ExecutorId, scopeName, key); - public ValueTask> ReadStateKeysAsync(string? scopeName = null) + public ValueTask> ReadStateKeysAsync(string? scopeName = null, CancellationToken cancellationToken = default) => RunnerContext.StateManager.ReadKeysAsync(ExecutorId, scopeName); - public ValueTask QueueStateUpdateAsync(string key, T? value, string? scopeName = null) + public ValueTask QueueStateUpdateAsync(string key, T? value, string? scopeName = null, CancellationToken cancellationToken = default) => RunnerContext.StateManager.WriteStateAsync(ExecutorId, scopeName, key, value); - public ValueTask QueueClearScopeAsync(string? scopeName = null) + public ValueTask QueueClearScopeAsync(string? scopeName = null, CancellationToken cancellationToken = default) => RunnerContext.StateManager.ClearStateAsync(ExecutorId, scopeName); public IReadOnlyDictionary? TraceContext => traceContext; @@ -274,7 +274,7 @@ public ValueTask QueueClearScopeAsync(string? scopeName = null) public bool WithCheckpointing { get; } - internal Task PrepareForCheckpointAsync(CancellationToken cancellation = default) + internal Task PrepareForCheckpointAsync(CancellationToken cancellationToken = default) { this.CheckEnded(); @@ -283,7 +283,7 @@ internal Task PrepareForCheckpointAsync(CancellationToken cancellation = default async Task InvokeCheckpointingAsync(Task executorTask) { Executor executor = await executorTask.ConfigureAwait(false); - await executor.OnCheckpointingAsync(this.Bind(executor.Id), cancellation).ConfigureAwait(false); + await executor.OnCheckpointingAsync(this.Bind(executor.Id), cancellationToken).ConfigureAwait(false); } } @@ -320,7 +320,7 @@ internal async ValueTask RepublishUnservicedRequestsAsync(CancellationToken canc { foreach (string requestId in this._externalRequests.Keys) { - await this.AddEventAsync(new RequestInfoEvent(this._externalRequests[requestId])) + await this.AddEventAsync(new RequestInfoEvent(this._externalRequests[requestId]), cancellationToken) .ConfigureAwait(false); } } @@ -386,7 +386,7 @@ public async ValueTask EndRunAsync() public IEnumerable JoinedSubworkflowRunners => this._joinedSubworkflowRunners; - public ValueTask AttachSuperstepAsync(ISuperStepRunner superStepRunner, CancellationToken cancellation = default) + public ValueTask AttachSuperstepAsync(ISuperStepRunner superStepRunner, CancellationToken cancellationToken = default) { // This needs to be a thread-safe ordered collection because we can potentially instantiate executors // in parallel, which means multiple sub-workflows could be attaching at the same time. @@ -394,9 +394,9 @@ public ValueTask AttachSuperstepAsync(ISuperStepRunner superStepRunner, Cancella return default; } - ValueTask ISuperStepJoinContext.ForwardWorkflowEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellation) - => this.AddEventAsync(workflowEvent); + ValueTask ISuperStepJoinContext.ForwardWorkflowEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken) + => this.AddEventAsync(workflowEvent, cancellationToken); - ValueTask ISuperStepJoinContext.SendMessageAsync(string senderId, [DisallowNull] TMessage message, CancellationToken cancellation) - => this.SendMessageAsync(senderId, Throw.IfNull(message)); + ValueTask ISuperStepJoinContext.SendMessageAsync(string senderId, [DisallowNull] TMessage message, CancellationToken cancellationToken) + => this.SendMessageAsync(senderId, Throw.IfNull(message), cancellationToken: cancellationToken); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/PortableValue.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/PortableValue.cs index 38865da089..3ca0fea0d0 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/PortableValue.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/PortableValue.cs @@ -114,10 +114,7 @@ public override int GetHashCode() /// true if the current value can be represented as type TValue; otherwise, false. public bool Is([NotNullWhen(true)] out TValue? value) { - if (this.Value is IDelayedDeserialization delayedDeserialization) - { - this._deserializedValueCache ??= delayedDeserialization.Deserialize(); - } + this.TryDeserializeAndUpdateCache(typeof(TValue), out _); if (this.Value is TValue typedValue) { @@ -152,11 +149,9 @@ public bool Is([NotNullWhen(true)] out TValue? value) /// true if the current instance can be assigned to targetType; otherwise, false. public bool IsType(Type targetType, [NotNullWhen(true)] out object? value) { + // Unfortunately, there is no way to check that the TypeId specified is assignable to the provided type Throw.IfNull(targetType); - if (this.Value is IDelayedDeserialization delayedDeserialization) - { - this._deserializedValueCache ??= delayedDeserialization.Deserialize(targetType); - } + this.TryDeserializeAndUpdateCache(targetType, out _); if (this.Value is not null && targetType.IsInstanceOfType(this.Value)) { @@ -167,4 +162,41 @@ public bool IsType(Type targetType, [NotNullWhen(true)] out object? value) value = null; return false; } + + private bool TryDeserializeAndUpdateCache(Type targetType, out object? replacedCacheValueOrNull) + { + replacedCacheValueOrNull = null; + + // Explicitly use _value here since we do not want to be overridden by the cache, if any + if (this._value is not IDelayedDeserialization delayedDeserialization) + { + // Not a delayed deserialization; nothing to do + return false; + } + + bool isCompatibleType = false; + if (this._deserializedValueCache == null || !(isCompatibleType = targetType.IsAssignableFrom(this._deserializedValueCache.GetType()))) + { + // Either we have no cache, or the types are incompatible; see if we can deserialize + try + { + object? deserialized = delayedDeserialization.Deserialize(targetType); + + if (deserialized != null && targetType.IsInstanceOfType(deserialized)) + { + replacedCacheValueOrNull = this._deserializedValueCache; + this._deserializedValueCache = deserialized; + + return true; + } + } + catch + { + isCompatibleType = false; + } + } + + // The last possibility is that we already deserialized successfully + return isCompatibleType; + } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Reflection/IMessageHandler.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Reflection/IMessageHandler.cs index 1903e49d44..3b18379907 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Reflection/IMessageHandler.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Reflection/IMessageHandler.cs @@ -1,5 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. +using System.Threading; using System.Threading.Tasks; namespace Microsoft.Agents.AI.Workflows.Reflection; @@ -15,8 +16,10 @@ public interface IMessageHandler /// /// The message to handle. /// The execution context. + /// The to monitor for cancellation requests. + /// The default is . /// A task that represents the asynchronous operation. - ValueTask HandleAsync(TMessage message, IWorkflowContext context); + ValueTask HandleAsync(TMessage message, IWorkflowContext context, CancellationToken cancellationToken = default); } /// @@ -32,6 +35,8 @@ public interface IMessageHandler /// /// The message to handle. /// The execution context. + /// The to monitor for cancellation requests. + /// The default is . /// A task that represents the asynchronous operation. - ValueTask HandleAsync(TMessage message, IWorkflowContext context); + ValueTask HandleAsync(TMessage message, IWorkflowContext context, CancellationToken cancellationToken = default); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Reflection/MessageHandlerInfo.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Reflection/MessageHandlerInfo.cs index e00675c473..f63a43b4a8 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Reflection/MessageHandlerInfo.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Reflection/MessageHandlerInfo.cs @@ -5,6 +5,7 @@ using System.Diagnostics.CodeAnalysis; using System.Linq; using System.Reflection; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.Execution; @@ -26,14 +27,19 @@ public MessageHandlerInfo(MethodInfo handlerInfo) this.HandlerInfo = handlerInfo; ParameterInfo[] parameters = handlerInfo.GetParameters(); - if (parameters.Length != 2) + if (parameters.Length != 3) { - throw new ArgumentException("Handler method must have exactly two parameters: TMessage and IExecutionContext.", nameof(handlerInfo)); + throw new ArgumentException("Handler method must have exactly three parameters: TMessage, IWorkflowContext, and CancellationToken.", nameof(handlerInfo)); } if (parameters[1].ParameterType != typeof(IWorkflowContext)) { - throw new ArgumentException("Handler method's second parameter must be of type IExecutionContext.", nameof(handlerInfo)); + throw new ArgumentException("Handler method's second parameter must be of type IWorkflowContext.", nameof(handlerInfo)); + } + + if (parameters[2].ParameterType != typeof(CancellationToken)) + { + throw new ArgumentException("Handler method's third parameter must be of type CancellationToken.", nameof(handlerInfo)); } this.InType = parameters[0].ParameterType; @@ -61,17 +67,17 @@ public MessageHandlerInfo(MethodInfo handlerInfo) } } - public static Func> Bind(Func handlerAsync, bool checkType, Type? resultType = null, Func>? unwrapper = null) + public static Func> Bind(Func handlerAsync, bool checkType, Type? resultType = null, Func>? unwrapper = null) { return InvokeHandlerAsync; - async ValueTask InvokeHandlerAsync(object message, IWorkflowContext workflowContext) + async ValueTask InvokeHandlerAsync(object message, IWorkflowContext workflowContext, CancellationToken cancellationToken) { bool expectingVoid = resultType is null || resultType == typeof(void); try { - object? maybeValueTask = handlerAsync(message, workflowContext); + object? maybeValueTask = handlerAsync(message, workflowContext, cancellationToken); if (expectingVoid) { @@ -109,6 +115,11 @@ async ValueTask InvokeHandlerAsync(object message, IWorkflowContext return CallResult.ReturnResult(result); } + catch (OperationCanceledException) + { + // If the operation was canceled, return a canceled CallResult. + return CallResult.Cancelled(wasVoid: expectingVoid); + } catch (Exception ex) { // If the handler throws an exception, return it in the CallResult. @@ -117,7 +128,7 @@ async ValueTask InvokeHandlerAsync(object message, IWorkflowContext } } - public Func> Bind< + public Func> Bind< [DynamicallyAccessedMembers( ReflectionDemands.RuntimeInterfaceDiscoveryAndInvocation) ] TExecutor @@ -128,9 +139,9 @@ ] TExecutor MethodInfo handlerMethod = this.HandlerInfo; return Bind(InvokeHandler, checkType, this.OutType, this.Unwrapper); - object? InvokeHandler(object message, IWorkflowContext workflowContext) + object? InvokeHandler(object message, IWorkflowContext workflowContext, CancellationToken cancellationToken) { - return handlerMethod.Invoke(executor, [message, workflowContext]); + return handlerMethod.Invoke(executor, [message, workflowContext, cancellationToken]); } } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/RouteBuilder.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/RouteBuilder.cs index 965de0cdaa..99cfdb6992 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/RouteBuilder.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/RouteBuilder.cs @@ -3,6 +3,7 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.Execution; using Microsoft.Shared.Diagnostics; @@ -10,12 +11,14 @@ System.Func< Microsoft.Agents.AI.Workflows.PortableValue, // message Microsoft.Agents.AI.Workflows.IWorkflowContext, // context + System.Threading.CancellationToken, // cancellation System.Threading.Tasks.ValueTask >; using MessageHandlerF = System.Func< object, // message Microsoft.Agents.AI.Workflows.IWorkflowContext, // context + System.Threading.CancellationToken, // cancellation System.Threading.Tasks.ValueTask >; @@ -73,32 +76,58 @@ internal RouteBuilder AddHandlerInternal(Type messageType, MessageHandlerF handl return this; } - internal RouteBuilder AddHandlerUntyped(Type type, Func handler, bool overwrite = false) + internal RouteBuilder AddHandlerUntyped(Type type, Func handler, bool overwrite = false) { Throw.IfNull(handler); return this.AddHandlerInternal(type, WrappedHandlerAsync, outputType: null, overwrite); - async ValueTask WrappedHandlerAsync(object msg, IWorkflowContext ctx) + async ValueTask WrappedHandlerAsync(object message, IWorkflowContext context, CancellationToken cancellationToken) { - await handler.Invoke(msg, ctx).ConfigureAwait(false); + await handler.Invoke(message, context, cancellationToken).ConfigureAwait(false); return CallResult.ReturnVoid(); } } - internal RouteBuilder AddHandlerUntyped(Type type, Func> handler, bool overwrite = false) + internal RouteBuilder AddHandlerUntyped(Type type, Func> handler, bool overwrite = false) { Throw.IfNull(handler); return this.AddHandlerInternal(type, WrappedHandlerAsync, outputType: typeof(TResult), overwrite); - async ValueTask WrappedHandlerAsync(object msg, IWorkflowContext ctx) + async ValueTask WrappedHandlerAsync(object message, IWorkflowContext context, CancellationToken cancellationToken) { - TResult result = await handler.Invoke(msg, ctx).ConfigureAwait(false); + TResult result = await handler.Invoke(message, context, cancellationToken).ConfigureAwait(false); return CallResult.ReturnResult(result); } } + /// + /// Registers a handler for messages of the specified input type in the workflow route. + /// + /// If a handler for the specified input type already exists and is + /// , the existing handler will not be replaced. Handlers are invoked asynchronously and are + /// expected to complete their processing before the workflow continues. + /// + /// A delegate that processes messages of type within the workflow context. The + /// delegate is invoked for each incoming message of the specified type. + /// to replace any existing handler for the specified input type; otherwise, to preserve the existing handler. + /// The current instance, enabling fluent configuration of additional handlers or route + /// options. + public RouteBuilder AddHandler(Action handler, bool overwrite = false) + { + Throw.IfNull(handler); + + return this.AddHandlerInternal(typeof(TInput), WrappedHandlerAsync, outputType: null, overwrite); + + async ValueTask WrappedHandlerAsync(object message, IWorkflowContext context, CancellationToken cancellationToken) + { + handler.Invoke((TInput)message, context, cancellationToken); + return CallResult.ReturnVoid(); + } + } + /// /// Registers a handler for messages of the specified input type in the workflow route. /// @@ -118,9 +147,35 @@ public RouteBuilder AddHandler(Action handler, return this.AddHandlerInternal(typeof(TInput), WrappedHandlerAsync, outputType: null, overwrite); - async ValueTask WrappedHandlerAsync(object msg, IWorkflowContext ctx) + async ValueTask WrappedHandlerAsync(object message, IWorkflowContext context, CancellationToken cancellationToken) + { + handler.Invoke((TInput)message, context); + return CallResult.ReturnVoid(); + } + } + + /// + /// Registers a handler for messages of the specified input type in the workflow route. + /// + /// If a handler for the specified input type already exists and is + /// , the existing handler will not be replaced. Handlers are invoked asynchronously and are + /// expected to complete their processing before the workflow continues. + /// + /// A delegate that processes messages of type within the workflow context. The + /// delegate is invoked for each incoming message of the specified type. + /// to replace any existing handler for the specified input type; otherwise, to preserve the existing handler. + /// The current instance, enabling fluent configuration of additional handlers or route + /// options. + public RouteBuilder AddHandler(Func handler, bool overwrite = false) + { + Throw.IfNull(handler); + + return this.AddHandlerInternal(typeof(TInput), WrappedHandlerAsync, outputType: null, overwrite); + + async ValueTask WrappedHandlerAsync(object message, IWorkflowContext context, CancellationToken cancellationToken) { - handler.Invoke((TInput)msg, ctx); + await handler.Invoke((TInput)message, context, cancellationToken).ConfigureAwait(false); return CallResult.ReturnVoid(); } } @@ -144,13 +199,39 @@ public RouteBuilder AddHandler(Func return this.AddHandlerInternal(typeof(TInput), WrappedHandlerAsync, outputType: null, overwrite); - async ValueTask WrappedHandlerAsync(object msg, IWorkflowContext ctx) + async ValueTask WrappedHandlerAsync(object message, IWorkflowContext context, CancellationToken cancellationToken) { - await handler.Invoke((TInput)msg, ctx).ConfigureAwait(false); + await handler.Invoke((TInput)message, context).ConfigureAwait(false); return CallResult.ReturnVoid(); } } + /// + /// Registers a handler function for messages of the specified input type in the workflow route. + /// + /// If a handler for the given input type already exists, setting to + /// will replace the existing handler; otherwise, an exception may be thrown. The handler + /// receives the input message and workflow context, and returns a result asynchronously. + /// The type of input message the handler will process. + /// The type of result produced by the handler. + /// A function that processes messages of type within the workflow context and returns + /// a representing the asynchronous result. + /// to replace any existing handler for the input type; otherwise, to + /// preserve existing handlers. + /// The current instance, enabling fluent configuration of workflow routes. + public RouteBuilder AddHandler(Func handler, bool overwrite = false) + { + Throw.IfNull(handler); + + return this.AddHandlerInternal(typeof(TInput), WrappedHandlerAsync, outputType: typeof(TResult), overwrite); + + async ValueTask WrappedHandlerAsync(object message, IWorkflowContext context, CancellationToken cancellationToken) + { + TResult result = handler.Invoke((TInput)message, context, cancellationToken); + return CallResult.ReturnResult(result); + } + } + /// /// Registers a handler function for messages of the specified input type in the workflow route. /// @@ -170,9 +251,35 @@ public RouteBuilder AddHandler(Func WrappedHandlerAsync(object msg, IWorkflowContext ctx) + async ValueTask WrappedHandlerAsync(object message, IWorkflowContext context, CancellationToken cancellationToken) + { + TResult result = handler.Invoke((TInput)message, context); + return CallResult.ReturnResult(result); + } + } + + /// + /// Registers a handler function for messages of the specified input type in the workflow route. + /// + /// If a handler for the given input type already exists, setting to + /// will replace the existing handler; otherwise, an exception may be thrown. The handler + /// receives the input message and workflow context, and returns a result asynchronously. + /// The type of input message the handler will process. + /// The type of result produced by the handler. + /// A function that processes messages of type within the workflow context and returns + /// a representing the asynchronous result. + /// to replace any existing handler for the input type; otherwise, to + /// preserve existing handlers. + /// The current instance, enabling fluent configuration of workflow routes. + public RouteBuilder AddHandler(Func> handler, bool overwrite = false) + { + Throw.IfNull(handler); + + return this.AddHandlerInternal(typeof(TInput), WrappedHandlerAsync, outputType: typeof(TResult), overwrite); + + async ValueTask WrappedHandlerAsync(object message, IWorkflowContext context, CancellationToken cancellationToken) { - TResult result = handler.Invoke((TInput)msg, ctx); + TResult result = await handler.Invoke((TInput)message, context, cancellationToken).ConfigureAwait(false); return CallResult.ReturnResult(result); } } @@ -196,9 +303,9 @@ public RouteBuilder AddHandler(Func WrappedHandlerAsync(object msg, IWorkflowContext ctx) + async ValueTask WrappedHandlerAsync(object message, IWorkflowContext context, CancellationToken cancellationToken) { - TResult result = await handler.Invoke((TInput)msg, ctx).ConfigureAwait(false); + TResult result = await handler.Invoke((TInput)message, context).ConfigureAwait(false); return CallResult.ReturnResult(result); } } @@ -215,6 +322,30 @@ private RouteBuilder AddCatchAll(CatchAllF handler, bool overwrite = false) return this; } + /// + /// Register a handler function as a catch-all handler: It will be used if not type-matching handler is registered. + /// + /// If a catch-all handler for already exists, setting to + /// will replace the existing handler; otherwise, an exception may be thrown. The handler receives the input message + /// wrapped as and workflow context, and returns a result asynchronously. + /// A function that processes messages wrapped as within the + /// workflow context. The delegate is invoked for each incoming message not otherwise handled. + /// to replace any existing handler for the input type; otherwise, to + /// preserve existing handlers. + /// The current instance, enabling fluent configuration of workflow routes. + public RouteBuilder AddCatchAll(Func handler, bool overwrite = false) + { + Throw.IfNull(handler); + + return this.AddCatchAll(WrappedHandlerAsync, overwrite); + + async ValueTask WrappedHandlerAsync(PortableValue message, IWorkflowContext context, CancellationToken cancellationToken) + { + await handler.Invoke(message, context, cancellationToken).ConfigureAwait(false); + return CallResult.ReturnVoid(); + } + } + /// /// Register a handler function as a catch-all handler: It will be used if not type-matching handler is registered. /// @@ -232,13 +363,37 @@ public RouteBuilder AddCatchAll(Func return this.AddCatchAll(WrappedHandlerAsync, overwrite); - async ValueTask WrappedHandlerAsync(PortableValue message, IWorkflowContext ctx) + async ValueTask WrappedHandlerAsync(PortableValue message, IWorkflowContext context, CancellationToken cancellationToken) { - await handler.Invoke(message, ctx).ConfigureAwait(false); + await handler.Invoke(message, context).ConfigureAwait(false); return CallResult.ReturnVoid(); } } + /// + /// Register a handler function as a catch-all handler: It will be used if not type-matching handler is registered. + /// + /// If a catch-all handler for already exists, setting to + /// will replace the existing handler; otherwise, an exception may be thrown. The handler receives the input message + /// wrapped as and workflow context, and returns a result asynchronously. + /// A function that processes messages wrapped as within the + /// workflow context and returns a representing the asynchronous result. + /// to replace any existing handler for the input type; otherwise, to + /// preserve existing handlers. + /// The current instance, enabling fluent configuration of workflow routes. + public RouteBuilder AddCatchAll(Func> handler, bool overwrite = false) + { + Throw.IfNull(handler); + + return this.AddCatchAll(WrappedHandlerAsync, overwrite); + + async ValueTask WrappedHandlerAsync(PortableValue message, IWorkflowContext context, CancellationToken cancellationToken) + { + TResult result = await handler.Invoke(message, context, cancellationToken).ConfigureAwait(false); + return CallResult.ReturnResult(result); + } + } + /// /// Register a handler function as a catch-all handler: It will be used if not type-matching handler is registered. /// @@ -256,13 +411,37 @@ public RouteBuilder AddCatchAll(Func WrappedHandlerAsync(PortableValue message, IWorkflowContext ctx) + async ValueTask WrappedHandlerAsync(PortableValue message, IWorkflowContext context, CancellationToken cancellationToken) { - TResult result = await handler.Invoke(message, ctx).ConfigureAwait(false); + TResult result = await handler.Invoke(message, context).ConfigureAwait(false); return CallResult.ReturnResult(result); } } + /// + /// Register a handler function as a catch-all handler: It will be used if not type-matching handler is registered. + /// + /// If a catch-all handler for already exists, setting to + /// will replace the existing handler; otherwise, an exception may be thrown. The handler receives the input message + /// wrapped as and workflow context, and returns a result asynchronously. + /// A function that processes messages wrapped as within the + /// workflow context. The delegate is invoked for each incoming message not otherwise handled. + /// to replace any existing handler for the input type; otherwise, to + /// preserve existing handlers. + /// The current instance, enabling fluent configuration of workflow routes. + public RouteBuilder AddCatchAll(Action handler, bool overwrite = false) + { + Throw.IfNull(handler); + + return this.AddCatchAll(WrappedHandlerAsync, overwrite); + + ValueTask WrappedHandlerAsync(PortableValue message, IWorkflowContext ctx, CancellationToken cancellationToken) + { + handler.Invoke(message, ctx, cancellationToken); + return new(CallResult.ReturnVoid()); + } + } + /// /// Register a handler function as a catch-all handler: It will be used if not type-matching handler is registered. /// @@ -280,13 +459,37 @@ public RouteBuilder AddCatchAll(Action handler, return this.AddCatchAll(WrappedHandlerAsync, overwrite); - ValueTask WrappedHandlerAsync(PortableValue message, IWorkflowContext ctx) + ValueTask WrappedHandlerAsync(PortableValue message, IWorkflowContext ctx, CancellationToken cancellationToken) { handler.Invoke(message, ctx); return new(CallResult.ReturnVoid()); } } + /// + /// Register a handler function as a catch-all handler: It will be used if not type-matching handler is registered. + /// + /// If a catch-all handler for already exists, setting to + /// will replace the existing handler; otherwise, an exception may be thrown. The handler receives the input message + /// wrapped as and workflow context, and returns a result asynchronously. + /// A function that processes messages wrapped as within the + /// workflow context and returns a representing the asynchronous result. + /// to replace any existing handler for the input type; otherwise, to + /// preserve existing handlers. + /// The current instance, enabling fluent configuration of workflow routes. + public RouteBuilder AddCatchAll(Func handler, bool overwrite = false) + { + Throw.IfNull(handler); + + return this.AddCatchAll(WrappedHandlerAsync, overwrite); + + ValueTask WrappedHandlerAsync(PortableValue message, IWorkflowContext context, CancellationToken cancellationToken) + { + TResult result = handler.Invoke(message, context, cancellationToken); + return new(CallResult.ReturnResult(result)); + } + } + /// /// Register a handler function as a catch-all handler: It will be used if not type-matching handler is registered. /// @@ -304,9 +507,9 @@ public RouteBuilder AddCatchAll(Func WrappedHandlerAsync(PortableValue message, IWorkflowContext ctx) + ValueTask WrappedHandlerAsync(PortableValue message, IWorkflowContext context, CancellationToken cancellationToken) { - TResult result = handler.Invoke(message, ctx); + TResult result = handler.Invoke(message, context); return new(CallResult.ReturnResult(result)); } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Run.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Run.cs index dec02fbd46..3dfa4f271e 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Run.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Run.cs @@ -43,8 +43,8 @@ internal async ValueTask RunToNextHaltAsync(CancellationToken cancellation /// /// Gets the current execution status of the workflow run. /// - public ValueTask GetStatusAsync(CancellationToken cancellation = default) - => this._runHandle.GetStatusAsync(cancellation); + public ValueTask GetStatusAsync(CancellationToken cancellationToken = default) + => this._runHandle.GetStatusAsync(cancellationToken); /// /// Gets all events emitted by the workflow. @@ -113,7 +113,7 @@ public async ValueTask ResumeAsync(CancellationToken cancellationToken { foreach (object? message in messages) { - await this._runHandle.EnqueueMessageUntypedAsync(message, cancellation: cancellationToken).ConfigureAwait(false); + await this._runHandle.EnqueueMessageUntypedAsync(message, cancellationToken: cancellationToken).ConfigureAwait(false); } } else diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/AIAgentHostExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/AIAgentHostExecutor.cs index e50e0f803a..836399c5c1 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/AIAgentHostExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/AIAgentHostExecutor.cs @@ -30,7 +30,7 @@ protected internal override async ValueTask OnCheckpointingAsync(IWorkflowContex if (this._thread is not null) { JsonElement threadValue = this._thread.Serialize(); - threadTask = context.QueueStateUpdateAsync(ThreadStateKey, threadValue).AsTask(); + threadTask = context.QueueStateUpdateAsync(ThreadStateKey, threadValue, cancellationToken: cancellationToken).AsTask(); } Task baseTask = base.OnCheckpointingAsync(context, cancellationToken).AsTask(); @@ -40,7 +40,7 @@ protected internal override async ValueTask OnCheckpointingAsync(IWorkflowContex protected internal override async ValueTask OnCheckpointRestoredAsync(IWorkflowContext context, CancellationToken cancellationToken = default) { - JsonElement? threadValue = await context.ReadStateAsync(ThreadStateKey).ConfigureAwait(false); + JsonElement? threadValue = await context.ReadStateAsync(ThreadStateKey, cancellationToken: cancellationToken).ConfigureAwait(false); if (threadValue.HasValue) { this._thread = this._agent.DeserializeThread(threadValue.Value); @@ -67,7 +67,7 @@ protected override async ValueTask TakeTurnAsync(List messages, IWo if (emitEvents ?? this._emitEvents) { - await context.AddEventAsync(new AgentRunUpdateEvent(this.Id, update)).ConfigureAwait(false); + await context.AddEventAsync(new AgentRunUpdateEvent(this.Id, update), cancellationToken).ConfigureAwait(false); } // TODO: FunctionCall request handling, and user info request handling. @@ -100,7 +100,7 @@ async ValueTask PublishCurrentMessageAsync() currentStreamingMessage.Contents = updates; updates = []; - await context.SendMessageAsync(currentStreamingMessage).ConfigureAwait(false); + await context.SendMessageAsync(currentStreamingMessage, cancellationToken: cancellationToken).ConfigureAwait(false); } currentStreamingMessage = null; diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/RequestInfoExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/RequestInfoExecutor.cs index 6226b00db9..afb07507f9 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/RequestInfoExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/RequestInfoExecutor.cs @@ -3,6 +3,7 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.Execution; using Microsoft.Shared.Diagnostics; @@ -55,7 +56,7 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) internal void AttachRequestSink(IExternalRequestSink requestSink) => this.RequestSink = Throw.IfNull(requestSink); - public async ValueTask HandleCatchAllAsync(PortableValue message, IWorkflowContext context) + public async ValueTask HandleCatchAllAsync(PortableValue message, IWorkflowContext context, CancellationToken cancellationToken) { Throw.IfNull(message); @@ -70,13 +71,13 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) } else if (message.Is(out ExternalRequest? request)) { - return await this.HandleAsync(request, context).ConfigureAwait(false); + return await this.HandleAsync(request, context, cancellationToken).ConfigureAwait(false); } return null; } - public async ValueTask HandleAsync(ExternalRequest message, IWorkflowContext context) + public async ValueTask HandleAsync(ExternalRequest message, IWorkflowContext context, CancellationToken cancellationToken = default) { Debug.Assert(this._allowWrapped); Throw.IfNull(message); @@ -100,7 +101,7 @@ public async ValueTask HandleAsync(ExternalRequest message, IWo return request; } - public async ValueTask HandleAsync(object message, IWorkflowContext context) + public async ValueTask HandleAsync(object message, IWorkflowContext context, CancellationToken cancellationToken = default) { Throw.IfNull(message); Debug.Assert(this.Port.Request.IsInstanceOfType(message)); @@ -111,7 +112,7 @@ public async ValueTask HandleAsync(object message, IWorkflowCon return request; } - public async ValueTask HandleAsync(ExternalResponse message, IWorkflowContext context) + public async ValueTask HandleAsync(ExternalResponse message, IWorkflowContext context, CancellationToken cancellationToken = default) { Throw.IfNull(message); Throw.IfNull(message.Data); @@ -127,14 +128,14 @@ public async ValueTask HandleAsync(object message, IWorkflowCon if (this._allowWrapped && this._wrappedRequests.TryGetValue(message.RequestId, out ExternalRequest? originalRequest)) { - await context.SendMessageAsync(originalRequest.RewrapResponse(message)).ConfigureAwait(false); + await context.SendMessageAsync(originalRequest.RewrapResponse(message), cancellationToken: cancellationToken).ConfigureAwait(false); } else { - await context.SendMessageAsync(message).ConfigureAwait(false); + await context.SendMessageAsync(message, cancellationToken: cancellationToken).ConfigureAwait(false); } - await context.SendMessageAsync(data).ConfigureAwait(false); + await context.SendMessageAsync(data, cancellationToken: cancellationToken).ConfigureAwait(false); return message; } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/WorkflowHostExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/WorkflowHostExecutor.cs index 78f79148e7..4bcffafef9 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/WorkflowHostExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/WorkflowHostExecutor.cs @@ -44,22 +44,22 @@ protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) return routeBuilder.AddCatchAll(this.QueueExternalMessageAsync); } - private async ValueTask QueueExternalMessageAsync(PortableValue portableValue, IWorkflowContext context) + private async ValueTask QueueExternalMessageAsync(PortableValue portableValue, IWorkflowContext context, CancellationToken cancellationToken) { if (portableValue.Is(out ExternalResponse? response)) { response = this.CheckAndUnqualifyResponse(response); - await this.EnsureRunSendMessageAsync(response).ConfigureAwait(false); + await this.EnsureRunSendMessageAsync(response, cancellationToken: cancellationToken).ConfigureAwait(false); } else { InProcessRunner runner = await this.EnsureRunnerAsync().ConfigureAwait(false); - IEnumerable validInputTypes = await runner.RunContext.GetStartingExecutorInputTypesAsync().ConfigureAwait(false); + IEnumerable validInputTypes = await runner.RunContext.GetStartingExecutorInputTypesAsync(cancellationToken).ConfigureAwait(false); foreach (Type candidateType in validInputTypes) { if (portableValue.IsType(candidateType, out object? message)) { - await this.EnsureRunSendMessageAsync(message, candidateType).ConfigureAwait(false); + await this.EnsureRunSendMessageAsync(message, candidateType, cancellationToken: cancellationToken).ConfigureAwait(false); return; } } @@ -87,7 +87,7 @@ internal async ValueTask EnsureRunnerAsync() return this._activeRunner; } - internal async ValueTask EnsureRunSendMessageAsync(object? incomingMessage = null, Type? incomingMessageType = null, bool resume = false, CancellationToken cancellation = default) + internal async ValueTask EnsureRunSendMessageAsync(object? incomingMessage = null, Type? incomingMessageType = null, bool resume = false, CancellationToken cancellationToken = default) { Debug.Assert(this._joinContext != null, "Must attach to a join context before starting the run."); @@ -114,20 +114,20 @@ internal async ValueTask EnsureRunSendMessageAsync(object? incomin throw new InvalidOperationException("No checkpoints available to resume from."); } - runHandle = await activeRunner.ResumeStreamAsync(ExecutionMode.Subworkflow, lastCheckpoint!, cancellation) + runHandle = await activeRunner.ResumeStreamAsync(ExecutionMode.Subworkflow, lastCheckpoint!, cancellationToken) .ConfigureAwait(false); if (incomingMessage != null) { - await runHandle.EnqueueUntypedAndRunAsync(incomingMessage, cancellation).ConfigureAwait(false); + await runHandle.EnqueueUntypedAndRunAsync(incomingMessage, cancellationToken).ConfigureAwait(false); } } else if (incomingMessage != null) { - runHandle = await activeRunner.BeginStreamAsync(ExecutionMode.Subworkflow, cancellation) + runHandle = await activeRunner.BeginStreamAsync(ExecutionMode.Subworkflow, cancellationToken) .ConfigureAwait(false); - await runHandle.EnqueueUntypedAndRunAsync(incomingMessage, cancellation).ConfigureAwait(false); + await runHandle.EnqueueUntypedAndRunAsync(incomingMessage, cancellationToken).ConfigureAwait(false); } else { @@ -136,14 +136,14 @@ internal async ValueTask EnsureRunSendMessageAsync(object? incomin } else { - runHandle = await activeRunner.BeginStreamAsync(ExecutionMode.Subworkflow, cancellation).ConfigureAwait(false); + runHandle = await activeRunner.BeginStreamAsync(ExecutionMode.Subworkflow, cancellationToken).ConfigureAwait(false); - await runHandle.EnqueueMessageUntypedAsync(Throw.IfNull(incomingMessage), cancellation: cancellation).ConfigureAwait(false); + await runHandle.EnqueueMessageUntypedAsync(Throw.IfNull(incomingMessage), cancellationToken: cancellationToken).ConfigureAwait(false); } this._run = new(runHandle); - await this._joinContext.AttachSuperstepAsync(activeRunner, cancellation).ConfigureAwait(false); + await this._joinContext.AttachSuperstepAsync(activeRunner, cancellationToken).ConfigureAwait(false); activeRunner.OutgoingEvents.EventRaised += this.ForwardWorkflowEventAsync; return this._run; @@ -228,7 +228,7 @@ internal async ValueTask AttachSuperStepContextAsync(ISuperStepJoinContext joinC protected internal override async ValueTask OnCheckpointingAsync(IWorkflowContext context, CancellationToken cancellationToken = default) { - await context.QueueStateUpdateAsync(nameof(CheckpointManager), this._checkpointManager).ConfigureAwait(false); + await context.QueueStateUpdateAsync(nameof(CheckpointManager), this._checkpointManager, cancellationToken: cancellationToken).ConfigureAwait(false); await base.OnCheckpointingAsync(context, cancellationToken).ConfigureAwait(false); } @@ -237,7 +237,7 @@ protected internal override async ValueTask OnCheckpointRestoredAsync(IWorkflowC { await base.OnCheckpointRestoredAsync(context, cancellationToken).ConfigureAwait(false); - InMemoryCheckpointManager manager = await context.ReadStateAsync(nameof(InMemoryCheckpointManager)).ConfigureAwait(false) ?? new(); + InMemoryCheckpointManager manager = await context.ReadStateAsync(nameof(InMemoryCheckpointManager), cancellationToken: cancellationToken).ConfigureAwait(false) ?? new(); if (this._checkpointManager == manager) { // We are restoring in the context of the same run; not need to rebuild the entire execution stack. @@ -249,7 +249,7 @@ protected internal override async ValueTask OnCheckpointRestoredAsync(IWorkflowC await this.ResetAsync().ConfigureAwait(false); } - StreamingRun run = await this.EnsureRunSendMessageAsync(cancellation: cancellationToken).ConfigureAwait(false); + StreamingRun run = await this.EnsureRunSendMessageAsync(cancellationToken: cancellationToken).ConfigureAwait(false); } private async ValueTask ResetAsync() diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/StreamingRun.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/StreamingRun.cs index aa70307c1e..ad6727fc54 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/StreamingRun.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/StreamingRun.cs @@ -31,8 +31,8 @@ internal StreamingRun(AsyncRunHandle runHandle) /// /// Gets the current execution status of the workflow run. /// - public ValueTask GetStatusAsync(CancellationToken cancellation = default) - => this._runHandle.GetStatusAsync(cancellation); + public ValueTask GetStatusAsync(CancellationToken cancellationToken = default) + => this._runHandle.GetStatusAsync(cancellationToken); /// /// Asynchronously sends the specified response to the external system and signals completion of the current @@ -67,7 +67,7 @@ internal ValueTask TrySendMessageUntypedAsync(object message, Type? declar /// progresses. The stream completes when a is encountered. Events are /// delivered in the order they are raised. /// A that can be used to cancel the streaming operation. If cancellation is - /// requested, the stream will end and no further events will be yielded. + /// requested, the stream will end and no further events will be yielded, but this will not cancel the workflow execution. /// An asynchronous stream of objects representing significant workflow state changes. /// The stream ends when the workflow completes or when cancellation is requested. public IAsyncEnumerable WatchStreamAsync( diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowBuilder.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowBuilder.cs index 7a848de4e5..94e4f20594 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowBuilder.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowBuilder.cs @@ -7,6 +7,7 @@ using System.Text.Json; using System.Threading; using System.Threading.Tasks; +using Microsoft.Agents.AI.Workflows.Checkpointing; using Microsoft.Agents.AI.Workflows.Observability; using Microsoft.Shared.Diagnostics; @@ -414,23 +415,13 @@ private Workflow BuildInternal(Activity? activity = null) { activity?.SetTag(Tags.WorkflowDescription, workflow.Description); } - if (activity is not null) - { - var workflowJsonDefinitionData = new WorkflowJsonDefinitionData - { - StartExecutorId = this._startExecutorId, - Edges = this._edges.Values.SelectMany(e => e), - Ports = this._inputPorts.Values, - OutputExecutors = this._outputExecutors - }; - activity.SetTag( + activity?.SetTag( Tags.WorkflowDefinition, JsonSerializer.Serialize( - workflowJsonDefinitionData, - WorkflowJsonDefinitionJsonContext.Default.WorkflowJsonDefinitionData + workflow.ToWorkflowInfo(), + WorkflowsJsonUtilities.JsonContext.Default.WorkflowInfo ) ); - } return workflow; } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowJsonDefintion.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowJsonDefintion.cs deleted file mode 100644 index d4e177b338..0000000000 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowJsonDefintion.cs +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -using System.Collections.Generic; -using System.Text.Json.Serialization; - -namespace Microsoft.Agents.AI.Workflows; - -[JsonSourceGenerationOptions(UseStringEnumConverter = true)] -[JsonSerializable(typeof(WorkflowJsonDefinitionData))] -internal partial class WorkflowJsonDefinitionJsonContext : JsonSerializerContext -{ -} - -internal class WorkflowJsonDefinitionData -{ - public string StartExecutorId { get; set; } = string.Empty; - public IEnumerable Edges { get; set; } = []; - public IEnumerable Ports { get; set; } = []; - public IEnumerable OutputExecutors { get; set; } = []; -} diff --git a/dotnet/src/Microsoft.Agents.AI/AgentExtensions.cs b/dotnet/src/Microsoft.Agents.AI/AgentExtensions.cs index e58fbf4920..097b789a84 100644 --- a/dotnet/src/Microsoft.Agents.AI/AgentExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI/AgentExtensions.cs @@ -2,6 +2,7 @@ using System; using System.ComponentModel; +using System.Text.RegularExpressions; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.AI; @@ -12,7 +13,7 @@ namespace Microsoft.Agents.AI; /// /// Provides extensions for . /// -public static class AIAgentExtensions +public static partial class AIAgentExtensions { /// /// Creates a new using the specified agent as the foundation for the builder pipeline. @@ -77,9 +78,32 @@ async Task InvokeAgentAsync( } options ??= new(); - options.Name ??= agent.Name; + options.Name ??= SanitizeAgentName(agent.Name); options.Description ??= agent.Description; return AIFunctionFactory.Create(InvokeAgentAsync, options); } + + /// + /// Removes characters from AI agent name that shouldn't be used in an AI function name. + /// + /// The AI agent name to sanitize. + /// + /// The sanitized agent name with invalid characters replaced by underscores, or null if the input is null. + /// + private static string? SanitizeAgentName(string? agentName) + { + return agentName is null + ? agentName + : InvalidNameCharsRegex().Replace(agentName, "_"); + } + + /// Regex that flags any character other than ASCII digits or letters. +#if NET + [GeneratedRegex("[^0-9A-Za-z]+")] + private static partial Regex InvalidNameCharsRegex(); +#else + private static Regex InvalidNameCharsRegex() => s_invalidNameCharsRegex; + private static readonly Regex s_invalidNameCharsRegex = new("[^0-9A-Za-z]+", RegexOptions.Compiled); +#endif } diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs index 4c93fe4295..baa36c0054 100644 --- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs @@ -52,7 +52,7 @@ internal ChatClientAgentThread( } var state = serializedThreadState.Deserialize( - AgentAbstractionsJsonUtilities.DefaultOptions.GetTypeInfo(typeof(ThreadState))) as ThreadState; + AgentJsonUtilities.DefaultOptions.GetTypeInfo(typeof(ThreadState))) as ThreadState; this.AIContextProvider = aiContextProviderFactory?.Invoke(state?.AIContextProviderState ?? default, jsonSerializerOptions); @@ -170,7 +170,7 @@ public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptio AIContextProviderState = aiContextProviderState }; - return JsonSerializer.SerializeToElement(state, AgentAbstractionsJsonUtilities.DefaultOptions.GetTypeInfo(typeof(ThreadState))); + return JsonSerializer.SerializeToElement(state, AgentJsonUtilities.DefaultOptions.GetTypeInfo(typeof(ThreadState))); } /// diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs index f7ad1ebcdc..f2b2bcfd6a 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs @@ -277,6 +277,31 @@ public async Task CreateFromAgent_InvokeWithComplexResponseFromAgentAsync_Return Assert.Equal("Complex response", result.ToString()); } + [Theory] + [InlineData("MyAgent", "MyAgent")] + [InlineData("Agent123", "Agent123")] + [InlineData("Agent_With_Underscores", "Agent_With_Underscores")] + [InlineData("Agent_With_________@@@@_Underscores", "Agent_With_Underscores")] + [InlineData("123Agent", "123Agent")] + [InlineData("My-Agent", "My_Agent")] + [InlineData("My Agent", "My_Agent")] + [InlineData("Agent@123", "Agent_123")] + [InlineData("Agent/With\\Slashes", "Agent_With_Slashes")] + [InlineData("Agent.With.Dots", "Agent_With_Dots")] + public void CreateFromAgent_SanitizesAgentName(string agentName, string expectedFunctionName) + { + // Arrange + var mockAgent = new Mock(); + mockAgent.Setup(a => a.Name).Returns(agentName); + + // Act + var result = mockAgent.Object.AsAIFunction(); + + // Assert + Assert.NotNull(result); + Assert.Equal(expectedFunctionName, result.Name); + } + /// /// Test implementation of AIAgent for testing purposes. /// diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Agents/TestAgent.yaml b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Agents/TestAgent.yaml index a85f303577..eddcc7b0aa 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Agents/TestAgent.yaml +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Agents/TestAgent.yaml @@ -2,4 +2,4 @@ type: foundry_agent name: BasicAgent description: Basic agent for integration tests model: - id: ${FOUNDRY_MODEL_DEPLOYMENT_NAME} + id: ${FOUNDRY_MEDIA_DEPLOYMENT_NAME} diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/AzureAgentProviderTest.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/AzureAgentProviderTest.cs index 45b7735eb9..87b9160196 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/AzureAgentProviderTest.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/AzureAgentProviderTest.cs @@ -8,21 +8,17 @@ using Azure.Identity; using Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests.Framework; using Microsoft.Extensions.AI; -using Microsoft.Extensions.Configuration; -using Shared.IntegrationTests; using Xunit.Abstractions; namespace Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests; public sealed class AzureAgentProviderTest(ITestOutputHelper output) : IntegrationTest(output) { - private AzureAIConfiguration? _configuration; - [Fact] public async Task ConversationTestAsync() { // Arrange - AzureAgentProvider provider = new(this.Configuration.Endpoint, new AzureCliCredential()); + AzureAgentProvider provider = new(this.FoundryConfiguration.Endpoint, new AzureCliCredential()); // Act string conversationId = await provider.CreateConversationAsync(); // Assert @@ -52,7 +48,7 @@ public async Task ConversationTestAsync() public async Task GetAgentTestAsync() { // Arrange - AzureAgentProvider provider = new(this.Configuration.Endpoint, new AzureCliCredential()); + AzureAgentProvider provider = new(this.FoundryConfiguration.Endpoint, new AzureCliCredential()); string agentName = $"TestAgent-{DateTime.UtcNow:yyMMdd-HHmmss-fff}"; string agent1Id = await this.CreateAgentAsync(); @@ -74,22 +70,8 @@ public async Task GetAgentTestAsync() private async ValueTask CreateAgentAsync(string? name = null) { - PersistentAgentsClient client = new(this.Configuration.Endpoint, new AzureCliCredential()); - PersistentAgent agent = await client.Administration.CreateAgentAsync(this.Configuration.DeploymentName, name: name); + PersistentAgentsClient client = new(this.FoundryConfiguration.Endpoint, new AzureCliCredential()); + PersistentAgent agent = await client.Administration.CreateAgentAsync(this.FoundryConfiguration.DeploymentName, name: name); return agent.Id; } - - private AzureAIConfiguration Configuration - { - get - { - if (this._configuration is null) - { - this._configuration ??= InitializeConfig().GetSection("AzureAI").Get(); - Assert.NotNull(this._configuration); - } - - return this._configuration; - } - } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/DeclarativeCodeGenTest.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/DeclarativeCodeGenTest.cs index 0dd9f08940..7044f5ca70 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/DeclarativeCodeGenTest.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/DeclarativeCodeGenTest.cs @@ -12,10 +12,10 @@ namespace Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests; /// /// Tests execution of workflow created by . /// -[Collection("Global")] public sealed class DeclarativeCodeGenTest(ITestOutputHelper output) : WorkflowTest(output) { [Theory] + [InlineData("CheckSystem.yaml", "CheckSystem.json")] [InlineData("SendActivity.yaml", "SendActivity.json")] [InlineData("InvokeAgent.yaml", "InvokeAgent.json")] [InlineData("InvokeAgent.yaml", "InvokeAgent.json", true)] @@ -33,7 +33,7 @@ public Task ValidateCaseAsync(string workflowFileName, string testcaseFileName, public Task ValidateScenarioAsync(string workflowFileName, string testcaseFileName, bool externalConveration = false) => this.RunWorkflowAsync(Path.Combine(GetRepoFolder(), "workflow-samples", workflowFileName), testcaseFileName, externalConveration); - protected override async Task RunAndVerifyAsync(Testcase testcase, string workflowPath, DeclarativeWorkflowOptions workflowOptions) + protected override async Task RunAndVerifyAsync(Testcase testcase, string workflowPath, DeclarativeWorkflowOptions workflowOptions, TInput input) { const string WorkflowNamespace = "Test.WorkflowProviders"; const string WorkflowPrefix = "Test"; @@ -47,15 +47,19 @@ protected override async Task RunAndVerifyAsync(Testcase testcase, strin workflowProviderName: $"{WorkflowPrefix}WorkflowProvider", WorkflowNamespace, workflowOptions, - (TInput)GetInput(testcase)); + input); - WorkflowEvents workflowEvents = await harness.RunTestcaseAsync(testcase, (TInput)GetInput(testcase)).ConfigureAwait(false); + WorkflowEvents workflowEvents = await harness.RunTestcaseAsync(testcase, input).ConfigureAwait(false); + // Verify no action events are present Assert.Empty(workflowEvents.ActionInvokeEvents); Assert.Empty(workflowEvents.ActionCompleteEvents); + // Verify the associated conversations AssertWorkflow.Conversation(workflowOptions.ConversationId, workflowEvents.ConversationEvents, testcase); + // Verify executor events AssertWorkflow.EventCounts(workflowEvents.ExecutorInvokeEvents.Count - 2, testcase); AssertWorkflow.EventCounts(workflowEvents.ExecutorCompleteEvents.Count - 2, testcase); + // Verify action sequences AssertWorkflow.EventSequence(workflowEvents.ExecutorInvokeEvents.Select(e => e.ExecutorId), testcase); } finally diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/DeclarativeWorkflowTest.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/DeclarativeWorkflowTest.cs index 2937444c19..fad4e7b8f7 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/DeclarativeWorkflowTest.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/DeclarativeWorkflowTest.cs @@ -12,10 +12,10 @@ namespace Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests; /// /// Tests execution of workflow created by . /// -[Collection("Global")] public sealed class DeclarativeWorkflowTest(ITestOutputHelper output) : WorkflowTest(output) { [Theory] + [InlineData("CheckSystem.yaml", "CheckSystem.json")] [InlineData("SendActivity.yaml", "SendActivity.json")] [InlineData("InvokeAgent.yaml", "InvokeAgent.json")] [InlineData("InvokeAgent.yaml", "InvokeAgent.json", true)] @@ -33,23 +33,29 @@ public Task ValidateCaseAsync(string workflowFileName, string testcaseFileName, public Task ValidateScenarioAsync(string workflowFileName, string testcaseFileName, bool externalConveration = false) => this.RunWorkflowAsync(Path.Combine(GetRepoFolder(), "workflow-samples", workflowFileName), testcaseFileName, externalConveration); - protected override async Task RunAndVerifyAsync(Testcase testcase, string workflowPath, DeclarativeWorkflowOptions workflowOptions) + protected override async Task RunAndVerifyAsync(Testcase testcase, string workflowPath, DeclarativeWorkflowOptions workflowOptions, TInput input) { Workflow workflow = DeclarativeWorkflowBuilder.Build(workflowPath, workflowOptions); WorkflowHarness harness = new(workflow, runId: Path.GetFileNameWithoutExtension(workflowPath)); - WorkflowEvents workflowEvents = await harness.RunTestcaseAsync(testcase, (TInput)GetInput(testcase)).ConfigureAwait(false); + WorkflowEvents workflowEvents = await harness.RunTestcaseAsync(testcase, input).ConfigureAwait(false); + // Verify executor events are present Assert.NotEmpty(workflowEvents.ExecutorInvokeEvents); Assert.NotEmpty(workflowEvents.ExecutorCompleteEvents); + // Verify the associated conversations AssertWorkflow.Conversation(workflowOptions.ConversationId, workflowEvents.ConversationEvents, testcase); + // Verify the agent responses AssertWorkflow.Responses(workflowEvents.AgentResponseEvents, testcase); + // Verify the messages on the workflow conversation await AssertWorkflow.MessagesAsync( GetConversationId(workflowOptions.ConversationId, workflowEvents.ConversationEvents), testcase, workflowOptions.AgentProvider); + // Verify action events AssertWorkflow.EventCounts(workflowEvents.ActionInvokeEvents.Count, testcase); AssertWorkflow.EventCounts(workflowEvents.ActionCompleteEvents.Count, testcase, isCompletion: true); + // Verify action sequences AssertWorkflow.EventSequence(workflowEvents.ActionInvokeEvents.Select(e => e.ActionId), testcase); } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/IntegrationTest.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/IntegrationTest.cs index 60aa4f38df..deccff658d 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/IntegrationTest.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/IntegrationTest.cs @@ -1,10 +1,14 @@ // Copyright (c) Microsoft. All rights reserved. using System; +using System.Collections.Frozen; using System.Reflection; +using System.Threading.Tasks; +using Azure.Identity; using Microsoft.Agents.AI.Workflows.Declarative.PowerFx; using Microsoft.Bot.ObjectModel; using Microsoft.Extensions.Configuration; +using Shared.IntegrationTests; using Xunit.Abstractions; namespace Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests.Framework; @@ -14,6 +18,21 @@ namespace Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests.Framework; /// public abstract class IntegrationTest : IDisposable { + private IConfigurationRoot? _configuration; + private AzureAIConfiguration? _foundryConfiguration; + + protected IConfigurationRoot Configuration => this._configuration ??= InitializeConfig(); + + internal AzureAIConfiguration FoundryConfiguration + { + get + { + this._foundryConfiguration ??= this.Configuration.GetSection("AzureAI").Get(); + Assert.NotNull(this._foundryConfiguration); + return this._foundryConfiguration; + } + } + public TestOutputAdapter Output { get; } protected IntegrationTest(ITestOutputHelper output) @@ -47,7 +66,33 @@ protected static void SetProduct() internal static string FormatVariablePath(string variableName, string? scope = null) => $"{scope ?? WorkflowFormulaState.DefaultScopeName}.{variableName}"; - protected static IConfigurationRoot InitializeConfig() => + protected async ValueTask CreateOptionsAsync(bool externalConversation = false) + { + FrozenDictionary agentMap = await AgentFactory.GetAgentsAsync(this.FoundryConfiguration, this.Configuration); + + IConfiguration workflowConfig = + new ConfigurationBuilder() + .AddInMemoryCollection(agentMap) + .Build(); + + AzureAgentProvider agentProvider = new(this.FoundryConfiguration.Endpoint, new AzureCliCredential()); + + string? conversationId = null; + if (externalConversation) + { + conversationId = await agentProvider.CreateConversationAsync().ConfigureAwait(false); + } + + return + new DeclarativeWorkflowOptions(agentProvider) + { + Configuration = workflowConfig, + ConversationId = conversationId, + LoggerFactory = this.Output + }; + } + + private static IConfigurationRoot InitializeConfig() => new ConfigurationBuilder() .AddJsonFile("appsettings.Development.json", true) .AddEnvironmentVariables() diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowHarness.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowHarness.cs index 797e254800..8ad1def744 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowHarness.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowHarness.cs @@ -7,6 +7,7 @@ using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.Declarative.Events; using Shared.Code; +using Xunit.Sdk; namespace Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests.Framework; @@ -17,7 +18,7 @@ internal sealed class WorkflowHarness(Workflow workflow, string runId) public async Task RunTestcaseAsync(Testcase testcase, TInput input) where TInput : notnull { - WorkflowEvents workflowEvents = await this.RunAsync(input); + WorkflowEvents workflowEvents = await this.RunWorkflowAsync(input); int requestCount = (workflowEvents.InputEvents.Count + 1) / 2; int responseCount = 0; while (requestCount > responseCount) @@ -36,7 +37,7 @@ public async Task RunTestcaseAsync(Testcase testcase, TI return workflowEvents; } - private async Task RunAsync(TInput input) where TInput : notnull + public async Task RunWorkflowAsync(TInput input) where TInput : notnull { Console.WriteLine("RUNNING WORKFLOW..."); Checkpointed run = await InProcessExecution.StreamAsync(workflow, input, this._checkpointManager, runId); @@ -98,6 +99,14 @@ private static async IAsyncEnumerable MonitorAndDisposeWorkflowRu exitLoop = true; } break; + + case ExecutorFailedEvent failureEvent: + Console.WriteLine($"Executor failed [{failureEvent.ExecutorId}]: {failureEvent.Data?.Message ?? "Unknown"}"); + break; + + case WorkflowErrorEvent errorEvent: + throw errorEvent.Data as Exception ?? new XunitException("Unexpected failure..."); + case DeclarativeActionInvokedEvent actionInvokeEvent: Console.WriteLine($"ACTION: {actionInvokeEvent.ActionId} [{actionInvokeEvent.ActionType}]"); break; diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowTest.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowTest.cs index a3ad1a5983..ccef59c88e 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowTest.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowTest.cs @@ -1,17 +1,13 @@ // Copyright (c) Microsoft. All rights reserved. using System; -using System.Collections.Frozen; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text.Json; using System.Text.Json.Serialization; using System.Threading.Tasks; -using Azure.Identity; using Microsoft.Extensions.AI; -using Microsoft.Extensions.Configuration; -using Shared.IntegrationTests; using Xunit.Abstractions; using Xunit.Sdk; @@ -25,7 +21,8 @@ public abstract class WorkflowTest(ITestOutputHelper output) : IntegrationTest(o protected abstract Task RunAndVerifyAsync( Testcase testcase, string workflowPath, - DeclarativeWorkflowOptions workflowOptions) where TInput : notnull; + DeclarativeWorkflowOptions workflowOptions, + TInput input) where TInput : notnull; protected Task RunWorkflowAsync( string workflowPath, @@ -36,15 +33,14 @@ protected Task RunWorkflowAsync( this.Output.WriteLine($"TESTCASE: {testcaseFileName}"); Testcase testcase = ReadTestcase(testcaseFileName); - IConfiguration configuration = InitializeConfig(); this.Output.WriteLine($" {testcase.Description}"); return testcase.Setup.Input.Type switch { - nameof(ChatMessage) => this.TestWorkflowAsync(testcase, workflowPath, configuration), - nameof(String) => this.TestWorkflowAsync(testcase, workflowPath, configuration), + nameof(ChatMessage) => this.TestWorkflowAsync(testcase, workflowPath), + nameof(String) => this.TestWorkflowAsync(testcase, workflowPath), _ => throw new NotSupportedException($"Input type '{testcase.Setup.Input.Type}' is not supported."), }; } @@ -52,38 +48,15 @@ protected Task RunWorkflowAsync( protected async Task TestWorkflowAsync( Testcase testcase, string workflowPath, - IConfiguration configuration, bool externalConversation = false) where TInput : notnull { this.Output.WriteLine($"INPUT: {testcase.Setup.Input.Value}"); - AzureAIConfiguration? foundryConfig = configuration.GetSection("AzureAI").Get(); - Assert.NotNull(foundryConfig); + DeclarativeWorkflowOptions workflowOptions = await this.CreateOptionsAsync(externalConversation).ConfigureAwait(false); - FrozenDictionary agentMap = await AgentFactory.GetAgentsAsync(foundryConfig, configuration); + TInput input = (TInput)GetInput(testcase); - IConfiguration workflowConfig = - new ConfigurationBuilder() - .AddInMemoryCollection(agentMap) - .Build(); - - AzureAgentProvider agentProvider = new(foundryConfig.Endpoint, new AzureCliCredential()); - - string? conversationId = null; - if (externalConversation) - { - conversationId = await agentProvider.CreateConversationAsync().ConfigureAwait(false); - } - - DeclarativeWorkflowOptions workflowOptions = - new(agentProvider) - { - Configuration = workflowConfig, - ConversationId = conversationId, - LoggerFactory = this.Output - }; - - await this.RunAndVerifyAsync(testcase, workflowPath, workflowOptions); + await this.RunAndVerifyAsync(testcase, workflowPath, workflowOptions, input); } protected static string? GetConversationId(string? conversationId, IReadOnlyList conversationEvents) @@ -101,14 +74,6 @@ protected async Task TestWorkflowAsync( return null; } - protected static object GetInput(Testcase testcase) where TInput : notnull => - testcase.Setup.Input.Type switch - { - nameof(ChatMessage) => new ChatMessage(ChatRole.User, testcase.Setup.Input.Value), - nameof(String) => testcase.Setup.Input.Value, - _ => throw new NotSupportedException($"Input type '{testcase.Setup.Input.Type}' is not supported."), - }; - protected static Testcase ReadTestcase(string testcaseFileName) { using Stream testcaseStream = File.Open(Path.Combine("Testcases", testcaseFileName), FileMode.Open); @@ -117,6 +82,14 @@ protected static Testcase ReadTestcase(string testcaseFileName) return testcase; } + private static object GetInput(Testcase testcase) where TInput : notnull => + testcase.Setup.Input.Type switch + { + nameof(ChatMessage) => new ChatMessage(ChatRole.User, testcase.Setup.Input.Value), + nameof(String) => testcase.Setup.Input.Value, + _ => throw new NotSupportedException($"Input type '{testcase.Setup.Input.Type}' is not supported."), + }; + internal static string GetRepoFolder() { DirectoryInfo? current = new(Directory.GetCurrentDirectory()); diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/MediaInputTest.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/MediaInputTest.cs new file mode 100644 index 0000000000..280df88b4b --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/MediaInputTest.cs @@ -0,0 +1,79 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.IO; +using System.Net.Http; +using System.Threading.Tasks; +using Azure.AI.Agents.Persistent; +using Azure.Identity; +using Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests.Framework; +using Microsoft.Extensions.AI; +using Xunit.Abstractions; + +namespace Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests; + +/// +/// Tests execution of workflow created by . +/// +public sealed class MediaInputTest(ITestOutputHelper output) : IntegrationTest(output) +{ + private const string WorkflowFileName = "MediaInput.yaml"; + private const string ImageReference = "https://upload.wikimedia.org/wikipedia/commons/5/56/White_shark.jpg"; + + [Fact(Skip = "Service issue prevents this simple case")] + public async Task ValidateImageUrlAsync() + { + this.Output.WriteLine($"Image: {ImageReference}"); + await this.ValidateImageAsync(new UriContent(ImageReference, "image/jpeg")); + } + + [Fact] + public async Task ValidateImageDataAsync() + { + byte[] imageData = await DownloadImageAsync(); + string encodedData = Convert.ToBase64String(imageData); + string imageUrl = $"data:image/png;base64,{encodedData}"; + this.Output.WriteLine($"Image: {imageUrl.Substring(0, 112)}..."); + await this.ValidateImageAsync(new DataContent(imageUrl)); + } + + [Fact] + public async Task ValidateImageUploadAsync() + { + byte[] imageData = await DownloadImageAsync(); + PersistentAgentsClient client = new(this.FoundryConfiguration.Endpoint, new AzureCliCredential()); + using MemoryStream contentStream = new(imageData); + PersistentAgentFileInfo fileInfo = await client.Files.UploadFileAsync(contentStream, PersistentAgentFilePurpose.Agents, "image.jpg"); + try + { + this.Output.WriteLine($"Image: {fileInfo.Id}"); + await this.ValidateImageAsync(new HostedFileContent(fileInfo.Id)); + } + finally + { + await client.Files.DeleteFileAsync(fileInfo.Id); + } + } + + private static async Task DownloadImageAsync() + { + using HttpClient client = new(); + client.DefaultRequestHeaders.UserAgent.ParseAdd("Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/110.0"); + return await client.GetByteArrayAsync(new Uri(ImageReference)); + } + + private async Task ValidateImageAsync(AIContent imageContent) + { + ChatMessage inputMessage = new(ChatRole.User, [new TextContent("Here is my image:"), imageContent]); + + DeclarativeWorkflowOptions options = await this.CreateOptionsAsync(); + Workflow workflow = DeclarativeWorkflowBuilder.Build(Path.Combine(Environment.CurrentDirectory, "Workflows", WorkflowFileName), options); + + WorkflowHarness harness = new(workflow, runId: Path.GetFileNameWithoutExtension(WorkflowFileName)); + WorkflowEvents workflowEvents = await harness.RunWorkflowAsync(inputMessage).ConfigureAwait(false); + Assert.Single(workflowEvents.ConversationEvents); + this.Output.WriteLine("CONVERSATION: " + workflowEvents.ConversationEvents[0].ConversationId); + Assert.Single(workflowEvents.AgentResponseEvents); + this.Output.WriteLine("RESPONSE: " + workflowEvents.AgentResponseEvents[0].Response.Text); + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Testcases/CheckSystem.json b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Testcases/CheckSystem.json new file mode 100644 index 0000000000..2e7d4b6f8d --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Testcases/CheckSystem.json @@ -0,0 +1,24 @@ +{ + "description": "Send an activity message.", + "setup": { + "input": { + "type": "String", + "value": "Everything good?" + } + }, + "validation": { + "conversation_count": 1, + "min_action_count": 2, + "max_action_count": -1, + "min_response_count": 0, + "actions": { + "start": [ + "check_system" + ], + "final": [ + "activity_passed", + "check_system_Post" + ] + } + } +} \ No newline at end of file diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Workflows/CheckSystem.yaml b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Workflows/CheckSystem.yaml new file mode 100644 index 0000000000..c3542fb057 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Workflows/CheckSystem.yaml @@ -0,0 +1,57 @@ +kind: Workflow +trigger: + + kind: OnConversationStart + id: workflow_test + actions: + + - kind: ConditionGroup + id: check_system + conditions: + + - condition: =IsBlank(System.Conversation) + id: conversation_check + actions: + - kind: EndDialog + id: conversation_bad + + - condition: =IsBlank(System.Conversation.Id) + id: conversation_id_check1 + actions: + - kind: EndDialog + id: conversation_id_bad1 + + - condition: =IsBlank(System.ConversationId) + id: conversation_id_check2 + actions: + - kind: EndDialog + id: conversation_id_bad2 + + - condition: =IsBlank(System.LastMessage) + id: message_check + actions: + - kind: EndDialog + id: message_bad + + - condition: =IsBlank(System.LastMessage.Id) + id: message_id_check1 + actions: + - kind: EndDialog + id: message_id_bad1 + + - condition: =IsBlank(System.LastMessageId) + id: message_id_check2 + actions: + - kind: EndDialog + id: message_id_bad2 + + - condition: =IsBlank(System.LastMessageText) + id: message_text_check + actions: + - kind: EndDialog + id: message_text_bad + + elseActions: + - kind: SendActivity + id: activity_passed + activity: PASSED! diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Workflows/MediaInput.yaml b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Workflows/MediaInput.yaml new file mode 100644 index 0000000000..c2a428f6d4 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Workflows/MediaInput.yaml @@ -0,0 +1,16 @@ +kind: Workflow +trigger: + + kind: OnConversationStart + id: workflow_test + actions: + + - kind: InvokeAzureAgent + id: invoke_vision + conversationId: =System.ConversationId + agent: + name: =Env.FOUNDRY_AGENT_TEST + input: + additionalInstructions: |- + Describe the image contained in the user request, if any; + otherwise, suggest that the user provide an image. diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.UnitTests/DeclarativeWorkflowTest.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.UnitTests/DeclarativeWorkflowTest.cs index ba77761906..7d7aee5418 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.UnitTests/DeclarativeWorkflowTest.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.UnitTests/DeclarativeWorkflowTest.cs @@ -13,6 +13,7 @@ using Microsoft.Extensions.AI; using Moq; using Xunit.Abstractions; +using Xunit.Sdk; namespace Microsoft.Agents.AI.Workflows.Declarative.UnitTests; @@ -21,7 +22,7 @@ namespace Microsoft.Agents.AI.Workflows.Declarative.UnitTests; /// public sealed class DeclarativeWorkflowTest(ITestOutputHelper output) : WorkflowTest(output) { - private List WorkflowEvents { get; set; } = []; + private List WorkflowEvents { get; } = []; private Dictionary WorkflowEventCounts { get; set; } = []; @@ -214,7 +215,7 @@ public void UnsupportedAction(Type type) AdaptiveDialog dialog = dialogBuilder.Build(); WorkflowFormulaState state = new(RecalcEngineFactory.Create()); - Mock mockAgentProvider = CreateMockProvider(); + Mock mockAgentProvider = CreateMockProvider("1"); DeclarativeWorkflowOptions options = new(mockAgentProvider.Object); WorkflowActionVisitor visitor = new(new DeclarativeWorkflowExecutor(WorkflowActionVisitor.Steps.Root("anything"), options, state, (message) => DeclarativeWorkflowBuilder.DefaultTransform(message)), state, options); WorkflowElementWalker walker = new(visitor); @@ -254,46 +255,57 @@ private Task RunWorkflowAsync(string workflowPath) => private async Task RunWorkflowAsync(string workflowPath, TInput workflowInput) where TInput : notnull { using StreamReader yamlReader = File.OpenText(Path.Combine("Workflows", workflowPath)); - Mock mockAgentProvider = CreateMockProvider(); + Mock mockAgentProvider = CreateMockProvider($"{workflowInput}"); DeclarativeWorkflowOptions workflowContext = new(mockAgentProvider.Object) { LoggerFactory = this.Output }; Workflow workflow = DeclarativeWorkflowBuilder.Build(yamlReader, workflowContext); await using StreamingRun run = await InProcessExecution.StreamAsync(workflow, workflowInput); - this.WorkflowEvents = run.WatchStreamAsync().ToEnumerable().ToList(); - foreach (WorkflowEvent workflowEvent in this.WorkflowEvents) + await foreach (WorkflowEvent workflowEvent in run.WatchStreamAsync()) { - if (workflowEvent is ExecutorInvokedEvent invokeEvent) - { - ActionExecutorResult? message = invokeEvent.Data as ActionExecutorResult; - this.Output.WriteLine($"EXEC: {invokeEvent.ExecutorId} << {message?.ExecutorId ?? "?"} [{message?.Result ?? "-"}]"); - } - else if (workflowEvent is DeclarativeActionInvokedEvent actionInvokeEvent) - { - this.Output.WriteLine($"ACTION ENTER: {actionInvokeEvent.ActionId}"); - } - else if (workflowEvent is DeclarativeActionCompletedEvent actionCompleteEvent) - { - this.Output.WriteLine($"ACTION EXIT: {actionCompleteEvent.ActionId}"); - } - else if (workflowEvent is MessageActivityEvent activityEvent) - { - this.Output.WriteLine($"ACTIVITY: {activityEvent.Message}"); - } - else if (workflowEvent is AgentRunResponseEvent messageEvent) + this.WorkflowEvents.Add(workflowEvent); + + switch (workflowEvent) { - this.Output.WriteLine($"MESSAGE: {messageEvent.Response.Messages[0].Text.Trim()}"); + case ExecutorInvokedEvent invokeEvent: + ActionExecutorResult? message = invokeEvent.Data as ActionExecutorResult; + this.Output.WriteLine($"EXEC: {invokeEvent.ExecutorId} << {message?.ExecutorId ?? "?"} [{message?.Result ?? "-"}]"); + break; + + case DeclarativeActionInvokedEvent actionInvokeEvent: + this.Output.WriteLine($"ACTION ENTER: {actionInvokeEvent.ActionId}"); + break; + + case DeclarativeActionCompletedEvent actionCompleteEvent: + this.Output.WriteLine($"ACTION EXIT: {actionCompleteEvent.ActionId}"); + break; + + case MessageActivityEvent activityEvent: + this.Output.WriteLine($"ACTIVITY: {activityEvent.Message}"); + break; + + case AgentRunResponseEvent messageEvent: + this.Output.WriteLine($"MESSAGE: {messageEvent.Response.Messages[0].Text.Trim()}"); + break; + + case ExecutorFailedEvent failureEvent: + Console.WriteLine($"Executor failed [{failureEvent.ExecutorId}]: {failureEvent.Data?.Message ?? "Unknown"}"); + break; + + case WorkflowErrorEvent errorEvent: + throw errorEvent.Data as Exception ?? new XunitException("Unexpected failure..."); } } + this.WorkflowEventCounts = this.WorkflowEvents.GroupBy(e => e.GetType()).ToDictionary(e => e.Key, e => e.Count()); } - private static Mock CreateMockProvider() + private static Mock CreateMockProvider(string input) { Mock mockAgentProvider = new(MockBehavior.Strict); mockAgentProvider.Setup(provider => provider.CreateConversationAsync(It.IsAny())).Returns(() => Task.FromResult(Guid.NewGuid().ToString("N"))); - mockAgentProvider.Setup(provider => provider.CreateMessageAsync(It.IsAny(), It.IsAny(), It.IsAny())).Returns(Task.FromResult(new ChatMessage(ChatRole.Assistant, "Hi!"))); + mockAgentProvider.Setup(provider => provider.CreateMessageAsync(It.IsAny(), It.IsAny(), It.IsAny())).Returns(Task.FromResult(new ChatMessage(ChatRole.Assistant, input))); return mockAgentProvider; } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.UnitTests/ObjectModel/WorkflowActionExecutorTest.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.UnitTests/ObjectModel/WorkflowActionExecutorTest.cs index 62f9231e68..9b667b55f7 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.UnitTests/ObjectModel/WorkflowActionExecutorTest.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.UnitTests/ObjectModel/WorkflowActionExecutorTest.cs @@ -2,10 +2,10 @@ using System; using System.Linq; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.Declarative.Extensions; using Microsoft.Agents.AI.Workflows.Declarative.Interpreter; -using Microsoft.Agents.AI.Workflows.Declarative.Kit; using Microsoft.Agents.AI.Workflows.Declarative.PowerFx; using Microsoft.Bot.ObjectModel; using Microsoft.PowerFx.Types; @@ -70,7 +70,7 @@ protected static TAction AssignParent(DialogAction.Builder actionBuilde internal sealed class TestWorkflowExecutor() : Executor("test_workflow") { - public override async ValueTask HandleAsync(WorkflowFormulaState message, IWorkflowContext context) => - await context.SendMessageAsync(new ActionExecutorResult(this.Id)).ConfigureAwait(false); + public override async ValueTask HandleAsync(WorkflowFormulaState message, IWorkflowContext context, CancellationToken cancellationToken) => + await context.SendResultMessageAsync(this.Id, cancellationToken).ConfigureAwait(false); } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/AgentWorkflowBuilderTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/AgentWorkflowBuilderTests.cs index 8a534269f0..e86df11d6f 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/AgentWorkflowBuilderTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/AgentWorkflowBuilderTests.cs @@ -6,6 +6,7 @@ using System.Runtime.CompilerServices; using System.Text; using System.Text.Json; +using System.Text.RegularExpressions; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.AI; @@ -159,7 +160,7 @@ public override async IAsyncEnumerable RunStreamingAsync private sealed class DoubleEchoAgentThread() : InMemoryAgentThread(); - [Fact(Skip = "issue #1109")] + [Fact] public async Task BuildConcurrent_AgentsRunInParallelAsync() { StrongBox> barrier = new(); @@ -182,10 +183,10 @@ public async Task BuildConcurrent_AgentsRunInParallelAsync() // TODO: https://github.com/microsoft/agent-framework/issues/784 // These asserts are flaky until we guarantee message delivery order. - //Assert.Single(Regex.Matches(updateText, "agent1")); - //Assert.Single(Regex.Matches(updateText, "agent2")); - //Assert.Equal(4, Regex.Matches(updateText, "abc").Count); - //Assert.Equal(2, result.Count); + Assert.Single(Regex.Matches(updateText, "agent1")); + Assert.Single(Regex.Matches(updateText, "agent2")); + Assert.Equal(4, Regex.Matches(updateText, "abc").Count); + Assert.Equal(2, result.Count); } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/ChatProtocolExecutorTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/ChatProtocolExecutorTests.cs new file mode 100644 index 0000000000..7fa7d42316 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/ChatProtocolExecutorTests.cs @@ -0,0 +1,266 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using Microsoft.Agents.AI.Workflows.Checkpointing; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.Workflows.UnitTests; + +/// +/// Tests for to verify message routing behavior. +/// +public class ChatProtocolExecutorTests +{ + private sealed class TestChatProtocolExecutor : ChatProtocolExecutor + { + public List ReceivedMessages { get; } = []; + public int TurnCount { get; private set; } + + public TestChatProtocolExecutor(string id = "test-executor", ChatProtocolExecutorOptions? options = null) + : base(id, options) + { + } + + protected override async ValueTask TakeTurnAsync( + List messages, + IWorkflowContext context, + bool? emitEvents, + CancellationToken cancellationToken = default) + { + this.ReceivedMessages.AddRange(messages); + this.TurnCount++; + + // Send messages back to context so they can be collected + await context.SendMessageAsync(messages, cancellationToken: cancellationToken); + } + } + + private sealed class TestWorkflowContext : IWorkflowContext + { + public List SentMessages { get; } = []; + + public ValueTask AddEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default) => + default; + + public ValueTask YieldOutputAsync(object output, CancellationToken cancellationToken = default) => + default; + + public ValueTask RequestHaltAsync() => + default; + + public ValueTask QueueClearScopeAsync(string? scopeName = null, CancellationToken cancellationToken = default) => + default; + + public ValueTask QueueStateUpdateAsync(string key, T? value, string? scopeName = null, CancellationToken cancellationToken = default) => + default; + + public ValueTask ReadStateAsync(string key, string? scopeName = null, CancellationToken cancellationToken = default) => + default; + + public ValueTask> ReadStateKeysAsync(string? scopeName = null, CancellationToken cancellationToken = default) => + default; + + public ValueTask SendMessageAsync(object message, string? targetId = null, CancellationToken cancellationToken = default) + { + this.SentMessages.Add(message); + return default; + } + + public IReadOnlyDictionary? TraceContext => null; + } + + [Fact] + public async Task ChatProtocolExecutor_Handles_ListOfChatMessagesAsync() + { + // Arrange + var executor = new TestChatProtocolExecutor(); + var context = new TestWorkflowContext(); + + List messages = + [ + new ChatMessage(ChatRole.User, "Hello"), + new ChatMessage(ChatRole.User, "World") + ]; + + // Act - Send List via ExecuteAsync + await executor.ExecuteAsync(messages, new TypeId(typeof(List)), context); + await executor.TakeTurnAsync(new TurnToken(emitEvents: false), context); + + // Assert + executor.ReceivedMessages.Should().HaveCount(2); + executor.ReceivedMessages[0].Text.Should().Be("Hello"); + executor.ReceivedMessages[1].Text.Should().Be("World"); + executor.TurnCount.Should().Be(1); + } + + [Fact] + public async Task ChatProtocolExecutor_Handles_ArrayOfChatMessagesAsync() + { + // Arrange + var executor = new TestChatProtocolExecutor(); + var context = new TestWorkflowContext(); + + ChatMessage[] messages = + [ + new ChatMessage(ChatRole.System, "System message"), + new ChatMessage(ChatRole.User, "User query"), + new ChatMessage(ChatRole.Assistant, "Agent reply") + ]; + + // Act - Send as ChatMessage[] + await executor.ExecuteAsync(messages, new TypeId(typeof(ChatMessage[])), context); + await executor.TakeTurnAsync(new TurnToken(emitEvents: false), context); + + // Assert + executor.ReceivedMessages.Should().HaveCount(3); + executor.ReceivedMessages[0].Role.Should().Be(ChatRole.System); + executor.ReceivedMessages[1].Role.Should().Be(ChatRole.User); + executor.ReceivedMessages[2].Role.Should().Be(ChatRole.Assistant); + executor.TurnCount.Should().Be(1); + } + + [Fact] + public async Task ChatProtocolExecutor_Handles_SingleChatMessageAsync() + { + // Arrange + var executor = new TestChatProtocolExecutor(); + var context = new TestWorkflowContext(); + + var message = new ChatMessage(ChatRole.User, "Single message"); + + // Act - Send as single ChatMessage + await executor.ExecuteAsync(message, new TypeId(typeof(ChatMessage)), context); + await executor.TakeTurnAsync(new TurnToken(emitEvents: false), context); + + // Assert + executor.ReceivedMessages.Should().HaveCount(1); + executor.ReceivedMessages[0].Text.Should().Be("Single message"); + executor.TurnCount.Should().Be(1); + } + + [Fact] + public async Task ChatProtocolExecutor_AccumulatesAndClearsMessagesPerTurnAsync() + { + var executor = new TestChatProtocolExecutor(); + var context = new TestWorkflowContext(); + + // Send multiple message batches before taking a turn + await executor.ExecuteAsync(new ChatMessage(ChatRole.User, "Message 1"), new TypeId(typeof(ChatMessage)), context); + await executor.ExecuteAsync(new List + { + new(ChatRole.User, "Message 2"), + new(ChatRole.User, "Message 3") + }, new TypeId(typeof(List)), context); + await executor.ExecuteAsync(new ChatMessage[] { new(ChatRole.User, "Message 4") }, new TypeId(typeof(ChatMessage[])), context); + + await executor.TakeTurnAsync(new TurnToken(emitEvents: false), context); + + executor.ReceivedMessages.Should().HaveCount(4); + executor.ReceivedMessages.Select(m => m.Text).Should().Equal("Message 1", "Message 2", "Message 3", "Message 4"); + executor.TurnCount.Should().Be(1); + + executor.ReceivedMessages.Clear(); + + // Second turn should process new messages only + await executor.ExecuteAsync(new List + { + new(ChatRole.User, "Second batch") + }, new TypeId(typeof(List)), context); + await executor.TakeTurnAsync(new TurnToken(emitEvents: false), context); + + executor.ReceivedMessages.Should().HaveCount(1); + executor.ReceivedMessages[0].Text.Should().Be("Second batch"); + executor.TurnCount.Should().Be(2); + } + + [Fact] + public async Task ChatProtocolExecutor_WithStringRole_ConvertsStringToMessageAsync() + { + var executor = new TestChatProtocolExecutor( + options: new ChatProtocolExecutorOptions + { + StringMessageChatRole = ChatRole.User + }); + var context = new TestWorkflowContext(); + + await executor.ExecuteAsync("String message", new TypeId(typeof(string)), context); + await executor.TakeTurnAsync(new TurnToken(emitEvents: false), context); + + executor.ReceivedMessages.Should().HaveCount(1); + executor.ReceivedMessages[0].Role.Should().Be(ChatRole.User); + executor.ReceivedMessages[0].Text.Should().Be("String message"); + } + + [Fact] + public async Task ChatProtocolExecutor_EmptyCollection_HandledCorrectlyAsync() + { + var executor = new TestChatProtocolExecutor(); + var context = new TestWorkflowContext(); + + await executor.ExecuteAsync(new List(), new TypeId(typeof(List)), context); + await executor.ExecuteAsync(Array.Empty(), new TypeId(typeof(ChatMessage[])), context); + await executor.TakeTurnAsync(new TurnToken(emitEvents: false), context); + + executor.ReceivedMessages.Should().BeEmpty(); + executor.TurnCount.Should().Be(1); + } + + [Theory] + [InlineData(typeof(List))] + [InlineData(typeof(ChatMessage[]))] + public async Task ChatProtocolExecutor_RoutesCollectionTypesAsync(Type collectionType) + { + var executor = new TestChatProtocolExecutor(); + var context = new TestWorkflowContext(); + + var sourceMessages = new[] { new ChatMessage(ChatRole.User, "Test message") }; + object messagesToSend = collectionType == typeof(List) ? sourceMessages.ToList() : sourceMessages; + + await executor.ExecuteAsync(messagesToSend, new TypeId(collectionType), context); + await executor.TakeTurnAsync(new TurnToken(emitEvents: false), context); + + executor.ReceivedMessages.Should().HaveCount(1); + executor.ReceivedMessages[0].Text.Should().Be("Test message"); + } + + [Fact] + public async Task ChatProtocolExecutor_MultipleTurns_EachTurnProcessesSeparatelyAsync() + { + var executor = new TestChatProtocolExecutor(); + var context = new TestWorkflowContext(); + + await executor.ExecuteAsync(new List { new(ChatRole.User, "Turn 1") }, new TypeId(typeof(List)), context); + await executor.TakeTurnAsync(new TurnToken(emitEvents: false), context); + + executor.ReceivedMessages.Should().HaveCount(1); + + await executor.ExecuteAsync(new ChatMessage(ChatRole.User, "Turn 2"), new TypeId(typeof(ChatMessage)), context); + await executor.TakeTurnAsync(new TurnToken(emitEvents: false), context); + + executor.ReceivedMessages.Should().HaveCount(2); + executor.ReceivedMessages[0].Text.Should().Be("Turn 1"); + executor.ReceivedMessages[1].Text.Should().Be("Turn 2"); + executor.TurnCount.Should().Be(2); + } + + [Fact] + public async Task ChatProtocolExecutor_InitialWorkflowMessages_RoutedCorrectlyAsync() + { + var executor = new TestChatProtocolExecutor(); + var context = new TestWorkflowContext(); + + List initialMessages = [new ChatMessage(ChatRole.User, "Kick off the workflow")]; + + await executor.ExecuteAsync(initialMessages, new TypeId(typeof(List)), context); + await executor.TakeTurnAsync(new TurnToken(emitEvents: false), context); + + executor.ReceivedMessages.Should().NotBeEmpty(); + executor.ReceivedMessages.Should().HaveCount(1); + executor.ReceivedMessages[0].Text.Should().Be("Kick off the workflow"); + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/InProcessStateTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/InProcessStateTests.cs index debde5fc95..014c51b3c0 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/InProcessStateTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/InProcessStateTests.cs @@ -43,12 +43,12 @@ Func> Creat return async (turn, context, cancellation) => { - TState? state = await context.ReadStateAsync(stateKey.Key, stateKey.ScopeId.ScopeName) + TState? state = await context.ReadStateAsync(stateKey.Key, stateKey.ScopeId.ScopeName, cancellation) .ConfigureAwait(false); state = action(state); - await context.QueueStateUpdateAsync(stateKey.Key, state, stateKey.ScopeId.ScopeName); + await context.QueueStateUpdateAsync(stateKey.Key, state, stateKey.ScopeId.ScopeName, cancellation); return turn.Next; }; diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/JsonSerializationTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/JsonSerializationTests.cs index 7447f46128..cd0f910ddb 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/JsonSerializationTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/JsonSerializationTests.cs @@ -33,7 +33,7 @@ private static JsonSerializerOptions TestCustomSerializedJsonOptions private static EdgeId TakeEdgeId() => new(Interlocked.Increment(ref s_nextEdgeId)); - private static T RunJsonRoundtrip(T value, JsonSerializerOptions? externalOptions = null, Expression>? predicate = null) + internal static T RunJsonRoundtrip(T value, JsonSerializerOptions? externalOptions = null, Expression>? predicate = null) { JsonMarshaller marshaller = new(externalOptions); @@ -172,7 +172,7 @@ private static ValueTask> CreateTestWorkflowAsync() return builder.BuildAsync(); } - private static async ValueTask CreateTestWorkflowInfoAsync() + internal static async ValueTask CreateTestWorkflowInfoAsync() { Workflow testWorkflow = await CreateTestWorkflowAsync().ConfigureAwait(false); return testWorkflow.ToWorkflowInfo(); diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/PortableValueTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/PortableValueTests.cs new file mode 100644 index 0000000000..86ffed0ab4 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/PortableValueTests.cs @@ -0,0 +1,130 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading.Tasks; +using FluentAssertions; +using Microsoft.Agents.AI.Workflows.Checkpointing; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.Workflows.UnitTests; + +public class PortableValueTests +{ + [SuppressMessage("Performance", "CA1812", Justification = "This is used as a Never/Bottom type.")] + private sealed class Never + { + private Never() { } + } + + [Theory] + [InlineData("string")] + [InlineData(42)] + [InlineData(true)] + [InlineData(3.14)] + public async Task Test_PortableValueRoundtripAsync(T value) + { + value.Should().NotBeNull(); + + PortableValue portableValue = new(value); + + portableValue.Is(out _).Should().BeFalse(); + portableValue.Is(out T? returnedValue).Should().BeTrue(); + returnedValue.Should().Be(value); + } + + [Fact] + public async Task Test_PortableValueRoundtripObjectAsync() + { + ChatMessage value = new(ChatRole.User, "Hello?"); + + PortableValue portableValue = new(value); + + portableValue.Is(out _).Should().BeFalse(); + portableValue.Is(out ChatMessage? returnedValue).Should().BeTrue(); + returnedValue.Should().Be(value); + } + + [Theory] + [InlineData("string")] + [InlineData(42)] + [InlineData(true)] + [InlineData(3.14)] + public async Task Test_DelayedSerializationRoundtripAsync(T value) + { + value.Should().NotBeNull(); + + TestDelayedDeserialization delayed = new(value); + PortableValue portableValue = new(delayed); + + portableValue.Is(out _).Should().BeFalse(); + portableValue.Is(out object? obj).Should().BeTrue(); + obj.Should().NotBeOfType(); + obj.Should().BeOfType() + .And.Subject.As() + .As().Should().Be(value); + + portableValue.Is(out T? returnedValue).Should().BeTrue(); + returnedValue.Should().Be(value); + } + + [Fact] + public async Task Test_DelayedSerializationRoundtripObjectAsync() + { + ChatMessage value = new(ChatRole.User, "Hello?"); + + TestDelayedDeserialization delayed = new(value); + PortableValue portableValue = new(delayed); + + portableValue.Is(out _).Should().BeFalse(); + portableValue.Is(out object? obj).Should().BeTrue(); + obj.Should().NotBeOfType(); + obj.Should().BeOfType() + .And.Subject.As() + .As().Should().Be(value); + + portableValue.Is(out ChatMessage? returnedValue).Should().BeTrue(); + returnedValue.Should().Be(value); + } + + private sealed class TestDelayedDeserialization : IDelayedDeserialization + { + [NotNull] + public T Value { get; } + + public TestDelayedDeserialization([DisallowNull] T value) + { + this.Value = value; + } + + public TValue Deserialize() + { + if (typeof(TValue) == typeof(object)) + { + return (TValue)(object)new PortableValue(this.Value); + } + + if (this.Value is TValue value) + { + return value; + } + + throw new InvalidOperationException(); + } + + public object? Deserialize(Type targetType) + { + if (targetType == typeof(object)) + { + return new PortableValue(this.Value); + } + + if (targetType.IsInstanceOfType(this.Value)) + { + return this.Value; + } + + return null; + } + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/ReflectionSmokeTest.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/ReflectionSmokeTest.cs index dd8470b11c..5027028387 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/ReflectionSmokeTest.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/ReflectionSmokeTest.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. using System; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.Execution; using Microsoft.Agents.AI.Workflows.Reflection; @@ -21,7 +22,7 @@ public bool InvokedHandler public class DefaultHandler() : BaseTestExecutor(nameof(DefaultHandler)), IMessageHandler { - public ValueTask HandleAsync(object message, IWorkflowContext context) + public ValueTask HandleAsync(object message, IWorkflowContext context, CancellationToken cancellationToken = default) { this.OnInvokedHandler(); return this.Handler(message, context); @@ -36,7 +37,7 @@ public Func Handler public class TypedHandler() : BaseTestExecutor>(nameof(TypedHandler)), IMessageHandler { - public ValueTask HandleAsync(TInput message, IWorkflowContext context) + public ValueTask HandleAsync(TInput message, IWorkflowContext context, CancellationToken cancellationToken = default) { this.OnInvokedHandler(); return this.Handler(message, context); @@ -51,7 +52,7 @@ public Func Handler public class TypedHandlerWithOutput() : BaseTestExecutor>(nameof(TypedHandlerWithOutput)), IMessageHandler { - public ValueTask HandleAsync(TInput message, IWorkflowContext context) + public ValueTask HandleAsync(TInput message, IWorkflowContext context, CancellationToken cancellationToken) { this.OnInvokedHandler(); return this.Handler(message, context); diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/01_Simple_Workflow_Sequential.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/01_Simple_Workflow_Sequential.cs index 2e89dabd9e..7ec83b4d03 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/01_Simple_Workflow_Sequential.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/01_Simple_Workflow_Sequential.cs @@ -3,6 +3,7 @@ using System; using System.IO; using System.Linq; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.InProc; using Microsoft.Agents.AI.Workflows.Reflection; @@ -44,17 +45,17 @@ public static async ValueTask RunAsync(TextWriter writer, ExecutionMode executio internal sealed class UppercaseExecutor() : ReflectingExecutor("UppercaseExecutor"), IMessageHandler { - public async ValueTask HandleAsync(string message, IWorkflowContext context) => + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) => message.ToUpperInvariant(); } internal sealed class ReverseTextExecutor() : ReflectingExecutor("ReverseTextExecutor"), IMessageHandler { - public async ValueTask HandleAsync(string message, IWorkflowContext context) + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) { string result = string.Concat(message.Reverse()); - await context.YieldOutputAsync(result).ConfigureAwait(false); + await context.YieldOutputAsync(result, cancellationToken).ConfigureAwait(false); return result; } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/02_Simple_Workflow_Condition.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/02_Simple_Workflow_Condition.cs index dd02f93594..5b9a0712ba 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/02_Simple_Workflow_Condition.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/02_Simple_Workflow_Condition.cs @@ -3,6 +3,7 @@ using System; using System.IO; using System.Linq; +using System.Threading; using System.Threading.Tasks; using Microsoft.Agents.AI.Workflows.InProc; using Microsoft.Agents.AI.Workflows.Reflection; @@ -56,7 +57,7 @@ public static async ValueTask RunAsync(TextWriter writer, ExecutionMode internal sealed class DetectSpamExecutor(string id, params string[] spamKeywords) : ReflectingExecutor(id), IMessageHandler { - public async ValueTask HandleAsync(string message, IWorkflowContext context) => + public async ValueTask HandleAsync(string message, IWorkflowContext context, CancellationToken cancellationToken = default) => spamKeywords.Any(keyword => message.IndexOf(keyword, StringComparison.OrdinalIgnoreCase) >= 0); } @@ -64,7 +65,7 @@ internal sealed class RespondToMessageExecutor(string id) : ReflectingExecutor (this.LowerBound + this.UpperBound) / 2; private int _currGuess = -1; - public async ValueTask HandleAsync(NumberSignal message, IWorkflowContext context) + public async ValueTask HandleAsync(NumberSignal message, IWorkflowContext context, CancellationToken cancellationToken = default) { switch (message) { case NumberSignal.Matched: - await context.YieldOutputAsync($"Guessed the number: {this._currGuess}") + await context.YieldOutputAsync($"Guessed the number: {this._currGuess}", cancellationToken) .ConfigureAwait(false); break; @@ -106,7 +106,7 @@ public JudgeExecutor(string id, int targetNumber) : base(id) this._targetNumber = targetNumber; } - public async ValueTask HandleAsync(int message, IWorkflowContext context) + public async ValueTask HandleAsync(int message, IWorkflowContext context, CancellationToken cancellationToken = default) { this.Tries = this.Tries is int tries ? tries + 1 : 1; @@ -118,12 +118,12 @@ public async ValueTask HandleAsync(int message, IWorkflowContext c protected internal override ValueTask OnCheckpointingAsync(IWorkflowContext context, CancellationToken cancellationToken = default) { - return context.QueueStateUpdateAsync("TryCount", this.Tries); + return context.QueueStateUpdateAsync("TryCount", this.Tries, cancellationToken: cancellationToken); } protected internal override async ValueTask OnCheckpointRestoredAsync(IWorkflowContext context, CancellationToken cancellationToken = default) { - this.Tries = await context.ReadStateAsync("TryCount").ConfigureAwait(false) ?? 0; + this.Tries = await context.ReadStateAsync("TryCount", cancellationToken: cancellationToken).ConfigureAwait(false) ?? 0; } public ValueTask ResetAsync() diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/08_Subworkflow_Simple.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/08_Subworkflow_Simple.cs index d865a6275c..35b739fa44 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/08_Subworkflow_Simple.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/08_Subworkflow_Simple.cs @@ -56,7 +56,7 @@ public static async ValueTask> RunAsync(TextWriter wr return results; } - private static ValueTask ProcessTextAsync(TextProcessingRequest request, IWorkflowContext context, CancellationToken cancellation = default) + private static ValueTask ProcessTextAsync(TextProcessingRequest request, IWorkflowContext context, CancellationToken cancellationToken = default) { int wordCount = 0; int charCount = 0; @@ -67,7 +67,7 @@ private static ValueTask ProcessTextAsync(TextProcessingRequest request, IWorkfl charCount = request.Text.Length; } - return context.YieldOutputAsync(new TextProcessingResult(request.TaskId, request.Text, wordCount, charCount)); + return context.YieldOutputAsync(new TextProcessingResult(request.TaskId, request.Text, wordCount, charCount), cancellationToken); } private sealed class TextProcessingOrchestrator() : Executor("TextOrchestrator") diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs index 6b883ebd1e..9aea98f068 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SpecializedExecutorSmokeTests.cs @@ -115,28 +115,28 @@ internal sealed class TestWorkflowContext : IWorkflowContext { public List> Updates { get; } = []; - public ValueTask AddEventAsync(WorkflowEvent workflowEvent) => + public ValueTask AddEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default) => default; - public ValueTask YieldOutputAsync(object output) => + public ValueTask YieldOutputAsync(object output, CancellationToken cancellationToken = default) => default; public ValueTask RequestHaltAsync() => default; - public ValueTask QueueClearScopeAsync(string? scopeName = null) => + public ValueTask QueueClearScopeAsync(string? scopeName = null, CancellationToken cancellationToken = default) => default; - public ValueTask QueueStateUpdateAsync(string key, T? value, string? scopeName = null) => + public ValueTask QueueStateUpdateAsync(string key, T? value, string? scopeName = null, CancellationToken cancellationToken = default) => default; - public ValueTask ReadStateAsync(string key, string? scopeName = null) => + public ValueTask ReadStateAsync(string key, string? scopeName = null, CancellationToken cancellationToken = default) => throw new NotImplementedException(); - public ValueTask> ReadStateKeysAsync(string? scopeName = null) => + public ValueTask> ReadStateKeysAsync(string? scopeName = null, CancellationToken cancellationToken = default) => throw new NotImplementedException(); - public ValueTask SendMessageAsync(object message, string? targetId = null) + public ValueTask SendMessageAsync(object message, string? targetId = null, CancellationToken cancellationToken = default) { if (message is List messages) { diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/StateManagerTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/StateManagerTests.cs index 4bb0746996..fc16fd6600 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/StateManagerTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/StateManagerTests.cs @@ -4,7 +4,9 @@ using System.Collections.Generic; using System.Threading.Tasks; using FluentAssertions; +using Microsoft.Agents.AI.Workflows.Checkpointing; using Microsoft.Agents.AI.Workflows.Execution; +using Microsoft.Extensions.AI; namespace Microsoft.Agents.AI.Workflows.UnitTests; @@ -451,4 +453,119 @@ private static async Task RunConflictingUpdatesTest_WriteVsClearAsync(string? sc await act.Should().NotThrowAsync("writes to private scopes should not be visible across executors"); } } + + private static void VerifyIs(PortableValue? candidatePV, TExpectedType value) + { + candidatePV.Should().NotBeNull(); + candidatePV.Is(out TExpectedType? candidateValue).Should().BeTrue(); + candidateValue.Should().Be(value); + } + + private static void VerifyIsNot(PortableValue? candidatePV) + { + candidatePV.Should().NotBeNull(); + candidatePV.Is(out TExpectedType? _).Should().BeFalse(); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task Test_LoadPortableValueStateAsync(bool publishStateUpdates) + { + ScopeId scope = new("executor1"); + const string StringValue = "string"; + const int IntValue = 42; + ScopeKey ScopeKey = new("executor1", "scope", "key"); + PortableValue PortableValueValue = new(StringValue); + + // Arrange + StateManager manager = new(); + await manager.WriteStateAsync(scope, nameof(StringValue), StringValue); + await manager.WriteStateAsync(scope, nameof(IntValue), IntValue); + await manager.WriteStateAsync(scope, nameof(ScopeKey), ScopeKey); + await manager.WriteStateAsync(scope, nameof(PortableValueValue), PortableValueValue); + + if (publishStateUpdates) + { + await manager.PublishUpdatesAsync(tracer: null); + } + + // Act & Assert - Read as the original types + PortableValue? stringAsPV = await manager.ReadStateAsync(scope, nameof(StringValue)); + VerifyIs(stringAsPV, StringValue); + VerifyIsNot(stringAsPV); + VerifyIsNot(stringAsPV); + VerifyIsNot(stringAsPV); + + PortableValue? intAsPV = await manager.ReadStateAsync(scope, nameof(IntValue)); + VerifyIsNot(intAsPV); + VerifyIs(intAsPV, IntValue); + VerifyIsNot(intAsPV); + VerifyIsNot(intAsPV); + + PortableValue? scopeKeyAsPV = await manager.ReadStateAsync(scope, nameof(ScopeKey)); + VerifyIsNot(scopeKeyAsPV); + VerifyIsNot(scopeKeyAsPV); + VerifyIs(scopeKeyAsPV, ScopeKey); + VerifyIsNot(scopeKeyAsPV); + + PortableValue? pvAsPV = await manager.ReadStateAsync(scope, nameof(PortableValueValue)); + VerifyIs(pvAsPV, StringValue); + VerifyIsNot(pvAsPV); + VerifyIsNot(pvAsPV); + + // Check that we don't double-wrap stored PortableValues on the out path + VerifyIsNot(pvAsPV); + } + + [Fact] + public async Task Test_LoadPortableValueState_AfterSerializationAsync() + { + ScopeId scope = new("executor1"); + const string StringValue = "string"; + const int IntValue = 42; + ScopeKey ScopeKey = new("executor1", "scope", "key"); + PortableValue PortableValueValue = new(StringValue); + + // Arrange + StateManager manager = new(); + await manager.WriteStateAsync(scope, nameof(StringValue), StringValue); + await manager.WriteStateAsync(scope, nameof(IntValue), IntValue); + await manager.WriteStateAsync(scope, nameof(ScopeKey), ScopeKey); + await manager.WriteStateAsync(scope, nameof(PortableValueValue), PortableValueValue); + + await manager.PublishUpdatesAsync(tracer: null); + + Dictionary exportedState = await manager.ExportStateAsync(); + Dictionary serializedState = JsonSerializationTests.RunJsonRoundtrip(exportedState); + Checkpoint testCheckpoint = new(0, await JsonSerializationTests.CreateTestWorkflowInfoAsync(), new([], [], []), serializedState, new()); + + manager = new(); + await manager.ImportStateAsync(testCheckpoint); + + // Act & Assert - Read as the original types + PortableValue? stringAsPV = await manager.ReadStateAsync(scope, nameof(StringValue)); + VerifyIs(stringAsPV, StringValue); + VerifyIsNot(stringAsPV); + VerifyIsNot(stringAsPV); + + PortableValue? intAsPV = await manager.ReadStateAsync(scope, nameof(IntValue)); + VerifyIsNot(intAsPV); + VerifyIs(intAsPV, IntValue); + VerifyIsNot(intAsPV); + + PortableValue? scopeKeyAsPV = await manager.ReadStateAsync(scope, nameof(ScopeKey)); + VerifyIsNot(scopeKeyAsPV); + VerifyIsNot(scopeKeyAsPV); + VerifyIs(scopeKeyAsPV, ScopeKey); + VerifyIsNot(scopeKeyAsPV); + + PortableValue? pvAsPV = await manager.ReadStateAsync(scope, nameof(PortableValueValue)); + VerifyIs(pvAsPV, StringValue); + VerifyIsNot(pvAsPV); + VerifyIsNot(pvAsPV); + + // Check that we don't double-wrap stored PortableValues on the out path + VerifyIsNot(pvAsPV); + } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestRunContext.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestRunContext.cs index c996bdb4e1..17935892a6 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestRunContext.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestRunContext.cs @@ -15,36 +15,36 @@ private sealed class BoundContext( TestRunContext runnerContext, IReadOnlyDictionary? traceContext) : IWorkflowContext { - public ValueTask AddEventAsync(WorkflowEvent workflowEvent) - => runnerContext.AddEventAsync(workflowEvent); + public ValueTask AddEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default) + => runnerContext.AddEventAsync(workflowEvent, cancellationToken); - public ValueTask YieldOutputAsync(object output) - => this.AddEventAsync(new WorkflowOutputEvent(output, executorId)); + public ValueTask YieldOutputAsync(object output, CancellationToken cancellationToken = default) + => this.AddEventAsync(new WorkflowOutputEvent(output, executorId), cancellationToken); public ValueTask RequestHaltAsync() => this.AddEventAsync(new RequestHaltEvent()); - public ValueTask QueueClearScopeAsync(string? scopeName = null) + public ValueTask QueueClearScopeAsync(string? scopeName = null, CancellationToken cancellationToken = default) => default; - public ValueTask QueueStateUpdateAsync(string key, T? value, string? scopeName = null) + public ValueTask QueueStateUpdateAsync(string key, T? value, string? scopeName = null, CancellationToken cancellationToken = default) => default; - public ValueTask ReadStateAsync(string key, string? scopeName = null) + public ValueTask ReadStateAsync(string key, string? scopeName = null, CancellationToken cancellationToken = default) => new(default(T?)); - public ValueTask> ReadStateKeysAsync(string? scopeName = null) + public ValueTask> ReadStateKeysAsync(string? scopeName = null, CancellationToken cancellationToken = default) => new([]); - public ValueTask SendMessageAsync(object message, string? targetId = null) - => runnerContext.SendMessageAsync(executorId, message, targetId); + public ValueTask SendMessageAsync(object message, string? targetId = null, CancellationToken cancellationToken = default) + => runnerContext.SendMessageAsync(executorId, message, targetId, cancellationToken); public IReadOnlyDictionary? TraceContext => traceContext; } public List Events { get; } = []; - public ValueTask AddEventAsync(WorkflowEvent workflowEvent) + public ValueTask AddEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken) { this.Events.Add(workflowEvent); return default; @@ -61,7 +61,7 @@ public ValueTask PostAsync(ExternalRequest request) } internal Dictionary> QueuedMessages { get; } = []; - public ValueTask SendMessageAsync(string sourceId, object message, string? targetId = null) + public ValueTask SendMessageAsync(string sourceId, object message, string? targetId = null, CancellationToken cancellationToken = default) { if (!this.QueuedMessages.TryGetValue(sourceId, out List? deliveryQueue)) { @@ -72,7 +72,7 @@ public ValueTask SendMessageAsync(string sourceId, object message, string? targe return default; } - ValueTask IRunnerContext.AdvanceAsync() => + ValueTask IRunnerContext.AdvanceAsync(CancellationToken cancellationToken) => throw new NotImplementedException(); public Dictionary Executors { get; set; } = []; @@ -80,10 +80,10 @@ ValueTask IRunnerContext.AdvanceAsync() => public bool WithCheckpointing => throw new NotSupportedException(); - ValueTask IRunnerContext.EnsureExecutorAsync(string executorId, IStepTracer? tracer) => + ValueTask IRunnerContext.EnsureExecutorAsync(string executorId, IStepTracer? tracer, CancellationToken cancellationToken) => new(this.Executors[executorId]); - public ValueTask> GetStartingExecutorInputTypesAsync(CancellationToken cancellation = default) + public ValueTask> GetStartingExecutorInputTypesAsync(CancellationToken cancellationToken = default) { if (this.Executors.TryGetValue(this.StartingExecutorId, out Executor? executor)) { @@ -93,11 +93,11 @@ public ValueTask> GetStartingExecutorInputTypesAsync(Cancellat throw new InvalidOperationException($"No executor with ID '{this.StartingExecutorId}' is registered in this context."); } - public ValueTask ForwardWorkflowEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellation = default) - => this.AddEventAsync(workflowEvent); + public ValueTask ForwardWorkflowEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default) + => this.AddEventAsync(workflowEvent, cancellationToken); - public ValueTask SendMessageAsync(string senderId, [System.Diagnostics.CodeAnalysis.DisallowNull] TMessage message, CancellationToken cancellation = default) - => this.SendMessageAsync(senderId, message, cancellation); + public ValueTask SendMessageAsync(string senderId, [System.Diagnostics.CodeAnalysis.DisallowNull] TMessage message, CancellationToken cancellationToken = default) + => this.SendMessageAsync(senderId, message, cancellationToken); - ValueTask ISuperStepJoinContext.AttachSuperstepAsync(ISuperStepRunner superStepRunner, CancellationToken cancellation) => default; + ValueTask ISuperStepJoinContext.AttachSuperstepAsync(ISuperStepRunner superStepRunner, CancellationToken cancellationToken) => default; } diff --git a/python/CHANGELOG.md b/python/CHANGELOG.md index 82952a70b1..1fa1c52b47 100644 --- a/python/CHANGELOG.md +++ b/python/CHANGELOG.md @@ -7,18 +7,27 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [1.0.0b251007] - 2025-10-07 + ### Added +- Added method to expose agent as MCP server ([#1248](https://github.com/microsoft/agent-framework/pull/1248)) +- Add PDF file support to OpenAI content parser with filename mapping ([#1121](https://github.com/microsoft/agent-framework/pull/1121)) +- Sample on integration of Azure OpenAI Responses Client with a local MCP server ([#1215](https://github.com/microsoft/agent-framework/pull/1215)) +- Added approval_mode and allowed_tools to local MCP ([#1203](https://github.com/microsoft/agent-framework/pull/1203)) - Introducing AI Function approval ([#1131](https://github.com/microsoft/agent-framework/pull/1131)) - Add name and description to workflows ([#1183](https://github.com/microsoft/agent-framework/pull/1183)) - Add Ollama example using OpenAIChatClient ([#1100](https://github.com/microsoft/agent-framework/pull/1100)) - Add DevUI improvements with color scheme, linking, agent details, and token usage data ([#1091](https://github.com/microsoft/agent-framework/pull/1091)) - Add semantic-kernel to agent-framework migration code samples ([#1045](https://github.com/microsoft/agent-framework/pull/1045)) -- Add metapackage metadata stub to restore flit builds ([#1043](https://github.com/microsoft/agent-framework/pull/1043)) ### Changed +- [BREAKING] Parameter naming and other fixes ([#1255](https://github.com/microsoft/agent-framework/pull/1255)) +- [BREAKING] Introduce add_agent functionality and added output_response to AgentExecutor; agent streaming behavior to follow workflow invocation ([#1184](https://github.com/microsoft/agent-framework/pull/1184)) +- OpenAI Clients accepting api_key callback ([#1139](https://github.com/microsoft/agent-framework/pull/1139)) +- Updated docstrings ([#1225](https://github.com/microsoft/agent-framework/pull/1225)) +- Standardize docstrings: Use Keyword Args for Settings classes and add environment variable examples ([#1202](https://github.com/microsoft/agent-framework/pull/1202)) - Update References to Agent2Agent protocol to use correct terminology ([#1162](https://github.com/microsoft/agent-framework/pull/1162)) - Update getting started samples to reflect AF and update unit test ([#1093](https://github.com/microsoft/agent-framework/pull/1093)) -- Update README with links to video content and initial code samples as quickstart ([#1049](https://github.com/microsoft/agent-framework/pull/1049)) - Update Lab Installation instructions to install from source ([#1051](https://github.com/microsoft/agent-framework/pull/1051)) - Update python DEV_SETUP to add brew-based uv installation ([#1173](https://github.com/microsoft/agent-framework/pull/1173)) - Update docstrings of all files and add example code in public interfaces ([#1107](https://github.com/microsoft/agent-framework/pull/1107)) @@ -29,8 +38,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Python: Foundry Agent Completeness ([#954](https://github.com/microsoft/agent-framework/pull/954)) ### Fixed -- Fix MCP tool calls to flatten nested JSON arguments (handle $ref schemas) ([#990](https://github.com/microsoft/agent-framework/pull/990)) -- Fix PyPI version strings to comply with PEP 440 ([#1040](https://github.com/microsoft/agent-framework/pull/1040)) +- Ollama + azureai openapi samples fix ([#1244](https://github.com/microsoft/agent-framework/pull/1244)) - Fix multimodal input sample: Document required environment variables and configuration options ([#1088](https://github.com/microsoft/agent-framework/pull/1088)) - Fix Azure AI Getting Started samples: Improve documentation and code readability ([#1089](https://github.com/microsoft/agent-framework/pull/1089)) - Fix a2a import ([#1058](https://github.com/microsoft/agent-framework/pull/1058)) @@ -53,5 +61,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 For more information, see the [announcement blog post](https://devblogs.microsoft.com/foundry/introducing-microsoft-agent-framework-the-open-source-engine-for-agentic-ai-apps/). -[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251001...HEAD +[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251007...HEAD +[1.0.0b251007]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251001...python-1.0.0b251007 [1.0.0b251001]: https://github.com/microsoft/agent-framework/releases/tag/python-1.0.0b251001 diff --git a/python/packages/a2a/agent_framework_a2a/_agent.py b/python/packages/a2a/agent_framework_a2a/_agent.py index 7b313c8f54..e353a7c7fa 100644 --- a/python/packages/a2a/agent_framework_a2a/_agent.py +++ b/python/packages/a2a/agent_framework_a2a/_agent.py @@ -9,6 +9,7 @@ import httpx from a2a.client import Client, ClientConfig, ClientFactory, minimal_agent_card +from a2a.client.auth.interceptor import AuthInterceptor from a2a.types import ( AgentCard, Artifact, @@ -78,6 +79,7 @@ def __init__( url: str | None = None, client: Client | None = None, http_client: httpx.AsyncClient | None = None, + auth_interceptor: AuthInterceptor | None = None, **kwargs: Any, ) -> None: """Initialize the A2AAgent. @@ -90,6 +92,7 @@ def __init__( url: The URL for the A2A server. client: The A2A client for the agent. http_client: Optional httpx.AsyncClient to use. + auth_interceptor: Optional authentication interceptor for secured endpoints. kwargs: any additional properties, passed to BaseAgent. """ super().__init__(id=id, name=name, description=description, **kwargs) @@ -123,7 +126,8 @@ def __init__( supported_transports=[TransportProtocol.jsonrpc], ) factory = ClientFactory(config) - self.client = factory.create(agent_card) + interceptors = [auth_interceptor] if auth_interceptor is not None else None + self.client = factory.create(agent_card, interceptors=interceptors) # type: ignore async def __aenter__(self) -> "A2AAgent": """Async context manager entry.""" diff --git a/python/packages/a2a/pyproject.toml b/python/packages/a2a/pyproject.toml index 9ebf2cf311..92b9b0c7a6 100644 --- a/python/packages/a2a/pyproject.toml +++ b/python/packages/a2a/pyproject.toml @@ -4,7 +4,7 @@ description = "A2A integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251001" +version = "1.0.0b251007" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/a2a/tests/test_a2a_agent.py b/python/packages/a2a/tests/test_a2a_agent.py index 34a5384ba7..b8fe97be60 100644 --- a/python/packages/a2a/tests/test_a2a_agent.py +++ b/python/packages/a2a/tests/test_a2a_agent.py @@ -497,3 +497,21 @@ def test_a2a_parts_to_contents_with_hosted_file_uri() -> None: assert isinstance(contents[0], UriContent) assert contents[0].uri == "hosted://storage/document.pdf" assert contents[0].media_type == "" # Converted None to empty string + + +def test_auth_interceptor_parameter() -> None: + """Test that auth_interceptor parameter is accepted without errors.""" + # Create a mock auth interceptor + mock_auth_interceptor = MagicMock() + + # Test that A2AAgent can be created with auth_interceptor parameter + # Using url parameter for simplicity + agent = A2AAgent( + name="test-agent", + url="https://test-agent.example.com", + auth_interceptor=mock_auth_interceptor, + ) + + # Verify the agent was created successfully + assert agent.name == "test-agent" + assert agent.client is not None diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py index 6b27a4224a..39ef30a0b5 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py @@ -42,6 +42,7 @@ from agent_framework.exceptions import ServiceInitializationError, ServiceResponseException from agent_framework.observability import use_observability from azure.ai.agents.models import ( + Agent, AgentsNamedToolChoice, AgentsNamedToolChoiceType, AgentsToolChoiceOptionMode, @@ -55,6 +56,7 @@ CodeInterpreterToolDefinition, FileSearchTool, FunctionName, + FunctionToolDefinition, ListSortOrder, McpTool, MessageDeltaChunk, @@ -251,6 +253,7 @@ def __init__( self.thread_id = thread_id self._should_delete_agent = False # Track whether we should delete the agent self._should_close_client = should_close_client # Track whether we should close client connection + self._agent_definition: Agent | None = None # Cached definition for existing agent async def setup_azure_ai_observability(self, enable_sensitive_data: bool | None = None) -> None: """Use this method to setup tracing in your Azure AI Project. @@ -351,24 +354,31 @@ async def _get_agent_id_or_create(self, run_options: dict[str, Any] | None = Non Returns: str: The agent_id to use """ + run_options = run_options or {} # If no agent_id is provided, create a temporary agent if self.agent_id is None: - if not self.model_id: - raise ServiceInitializationError("Model deployment name is required for agent creation.") + if "model" not in run_options or not run_options["model"]: + raise ServiceInitializationError( + "Model deployment name is required for agent creation, " + "can also be passed to the get_response methods." + ) agent_name: str = self.agent_name or "UnnamedAgent" - args: dict[str, Any] = {"model": self.model_id, "name": agent_name} - if run_options: - if "tools" in run_options: - args["tools"] = run_options["tools"] - if "tool_resources" in run_options: - args["tool_resources"] = run_options["tool_resources"] - if "instructions" in run_options: - args["instructions"] = run_options["instructions"] - if "response_format" in run_options: - args["response_format"] = run_options["response_format"] + args: dict[str, Any] = { + "model": run_options["model"], + "name": agent_name, + } + if "tools" in run_options: + args["tools"] = run_options["tools"] + if "tool_resources" in run_options: + args["tool_resources"] = run_options["tool_resources"] + if "instructions" in run_options: + args["instructions"] = run_options["instructions"] + if "response_format" in run_options: + args["response_format"] = run_options["response_format"] created_agent = await self.project_client.agents.create_agent(**args) self.agent_id = str(created_agent.id) + self._agent_definition = created_agent self._should_delete_agent = True return self.agent_id @@ -663,6 +673,26 @@ async def _cleanup_agent_if_needed(self) -> None: self.agent_id = None self._should_delete_agent = False + async def _load_agent_definition_if_needed(self) -> Agent | None: + """Load and cache agent details if not already loaded.""" + if self._agent_definition is None and self.agent_id is not None: + self._agent_definition = await self.project_client.agents.get_agent(self.agent_id) + return self._agent_definition + + def _prepare_tool_choice(self, chat_options: ChatOptions) -> None: + """Prepare the tools and tool choice for the chat options. + + Args: + chat_options: The chat options to prepare. + """ + chat_tool_mode = chat_options.tool_choice + if chat_tool_mode is None or chat_tool_mode == ToolMode.NONE or chat_tool_mode == "none": + chat_options.tools = None + chat_options.tool_choice = ToolMode.NONE.mode + return + + chat_options.tool_choice = chat_tool_mode.mode if isinstance(chat_tool_mode, ToolMode) else chat_tool_mode + async def _create_run_options( self, messages: MutableSequence[ChatMessage], @@ -671,18 +701,33 @@ async def _create_run_options( ) -> tuple[dict[str, Any], list[FunctionResultContent | FunctionApprovalResponseContent] | None]: run_options: dict[str, Any] = {**kwargs} + agent_definition = await self._load_agent_definition_if_needed() + if chat_options is not None: run_options["max_completion_tokens"] = chat_options.max_tokens - run_options["model"] = chat_options.model_id + if chat_options.model_id is not None: + run_options["model"] = chat_options.model_id + else: + run_options["model"] = self.model_id run_options["top_p"] = chat_options.top_p run_options["temperature"] = chat_options.temperature run_options["parallel_tool_calls"] = chat_options.allow_multiple_tool_calls + tool_definitions: list[ToolDefinition | dict[str, Any]] = [] + + # Add tools from existing agent + if agent_definition is not None: + # Don't include function tools, since they will be passed through chat_options.tools + agent_tools = [tool for tool in agent_definition.tools if not isinstance(tool, FunctionToolDefinition)] + if agent_tools: + tool_definitions.extend(agent_tools) + if agent_definition.tool_resources: + run_options["tool_resources"] = agent_definition.tool_resources + if chat_options.tool_choice is not None: if chat_options.tool_choice != "none" and chat_options.tools: - tool_definitions = await self._prep_tools(chat_options.tools, run_options) - if tool_definitions: - run_options["tools"] = tool_definitions + # Add run tools + tool_definitions.extend(await self._prep_tools(chat_options.tools, run_options)) # Handle MCP tool resources for approval mode mcp_tools = [tool for tool in chat_options.tools if isinstance(tool, HostedMCPTool)] @@ -731,6 +776,9 @@ async def _create_run_options( function=FunctionName(name=chat_options.tool_choice.required_function_name), ) + if tool_definitions: + run_options["tools"] = tool_definitions + if chat_options.response_format is not None: run_options["response_format"] = ResponseFormatJsonSchemaType( json_schema=ResponseFormatJsonSchema( @@ -739,7 +787,7 @@ async def _create_run_options( ) ) - instructions: list[str] = [chat_options.instructions] if chat_options and chat_options.instructions else [] + instructions: list[str] = [] required_action_results: list[FunctionResultContent | FunctionApprovalResponseContent] | None = None additional_messages: list[ThreadMessageOptions] | None = None @@ -781,6 +829,14 @@ async def _create_run_options( if additional_messages is not None: run_options["additional_messages"] = additional_messages + # Add instruction from existing agent at the beginning + if ( + agent_definition is not None + and agent_definition.instructions + and agent_definition.instructions not in instructions + ): + instructions.insert(0, agent_definition.instructions) + if len(instructions) > 0: run_options["instructions"] = "".join(instructions) @@ -806,8 +862,9 @@ async def _prep_tools( config_args["market"] = market if set_lang := additional_props.get("set_lang"): config_args["set_lang"] = set_lang - # Bing Grounding + # Bing Grounding (support both connection_id and connection_name) connection_id = additional_props.get("connection_id") or os.getenv("BING_CONNECTION_ID") + connection_name = additional_props.get("connection_name") or os.getenv("BING_CONNECTION_NAME") # Custom Bing Search custom_connection_name = additional_props.get("custom_connection_name") or os.getenv( "BING_CUSTOM_CONNECTION_NAME" @@ -816,8 +873,26 @@ async def _prep_tools( "BING_CUSTOM_INSTANCE_NAME" ) bing_search: BingGroundingTool | BingCustomSearchTool | None = None - if connection_id and not custom_connection_name and not custom_configuration_name: - bing_search = BingGroundingTool(connection_id=connection_id, **config_args) + if ( + (connection_id or connection_name) + and not custom_connection_name + and not custom_configuration_name + ): + if connection_id: + conn_id = connection_id + elif connection_name: + try: + bing_connection = await self.project_client.connections.get(name=connection_name) + except HttpResponseError as err: + raise ServiceInitializationError( + f"Bing connection '{connection_name}' not found in the Azure AI Project.", + err, + ) from err + else: + conn_id = bing_connection.id + else: + raise ServiceInitializationError("Neither connection_id nor connection_name provided.") + bing_search = BingGroundingTool(connection_id=conn_id, **config_args) if custom_connection_name and custom_configuration_name: try: bing_custom_connection = await self.project_client.connections.get( @@ -836,10 +911,11 @@ async def _prep_tools( ) if not bing_search: raise ServiceInitializationError( - "Bing search tool requires either a 'connection_id' for Bing Grounding " + "Bing search tool requires either 'connection_id' or 'connection_name' for Bing Grounding " "or both 'custom_connection_name' and 'custom_instance_name' for Custom Bing Search. " - "These can be provided via the tool's additional_properties or environment variables: " - "'BING_CONNECTION_ID', 'BING_CUSTOM_CONNECTION_NAME', 'BING_CUSTOM_INSTANCE_NAME'" + "These can be provided via additional_properties or environment variables: " + "'BING_CONNECTION_ID', 'BING_CONNECTION_NAME', 'BING_CUSTOM_CONNECTION_NAME', " + "'BING_CUSTOM_INSTANCE_NAME'" ) tool_definitions.extend(bing_search.definitions) case HostedCodeInterpreterTool(): @@ -898,6 +974,10 @@ async def _prep_tools( filter=additional_props.get("filter", ""), ) tool_definitions.extend(ai_search.definitions) + # Add tool resources for Azure AI Search + if run_options is not None: + run_options.setdefault("tool_resources", {}) + run_options["tool_resources"].update(ai_search.resources) case ToolDefinition(): tool_definitions.append(tool) case dict(): diff --git a/python/packages/azure-ai/pyproject.toml b/python/packages/azure-ai/pyproject.toml index aa0a7d6f0e..ce2bd9573f 100644 --- a/python/packages/azure-ai/pyproject.toml +++ b/python/packages/azure-ai/pyproject.toml @@ -4,7 +4,7 @@ description = "Azure AI Foundry integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251001" +version = "1.0.0b251007" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py index 793334fc0e..42abdf6779 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py @@ -46,7 +46,7 @@ ) from azure.ai.projects.models import ConnectionType from azure.core.credentials_async import AsyncTokenCredential -from azure.core.exceptions import HttpResponseError +from azure.core.exceptions import HttpResponseError, ResourceNotFoundError from azure.identity.aio import AzureCliCredential from pydantic import BaseModel, Field, ValidationError from pytest import MonkeyPatch @@ -81,11 +81,12 @@ def create_test_azure_ai_chat_client( client.project_client = mock_ai_project_client client.credential = None client.agent_id = agent_id - client.agent_name = None + client.agent_name = agent_name client.model_id = azure_ai_settings.model_deployment_name client.thread_id = thread_id - client._should_delete_agent = should_delete_agent - client._should_close_client = False + client._should_delete_agent = should_delete_agent # type: ignore + client._should_close_client = False # type: ignore + client._agent_definition = None # type: ignore client.additional_properties = {} client.middleware = None @@ -285,7 +286,7 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_create_new( azure_ai_settings = AzureAISettings(model_deployment_name=azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"]) chat_client = create_test_azure_ai_chat_client(mock_ai_project_client, azure_ai_settings=azure_ai_settings) - agent_id = await chat_client._get_agent_id_or_create() # type: ignore + agent_id = await chat_client._get_agent_id_or_create(run_options={"model": azure_ai_settings.model_deployment_name}) # type: ignore assert agent_id == "test-agent-id" assert chat_client._should_delete_agent # type: ignore @@ -297,6 +298,9 @@ async def test_azure_ai_chat_client_tool_results_without_thread_error_via_public """Test that tool results without thread ID raise error through public API.""" chat_client = create_test_azure_ai_chat_client(mock_ai_project_client, agent_id="test-agent") + # Mock get_agent + mock_ai_project_client.agents.get_agent = AsyncMock(return_value=None) + # Create messages with tool results but no thread/conversation ID messages = [ ChatMessage(role=Role.USER, text="Hello"), @@ -315,6 +319,9 @@ async def test_azure_ai_chat_client_thread_management_through_public_api(mock_ai """Test thread creation and management through public API.""" chat_client = create_test_azure_ai_chat_client(mock_ai_project_client, agent_id="test-agent") + # Mock get_agent to avoid the async error + mock_ai_project_client.agents.get_agent = AsyncMock(return_value=None) + mock_thread = MagicMock() mock_thread.id = "new-thread-456" mock_ai_project_client.agents.threads.create = AsyncMock(return_value=mock_thread) @@ -451,6 +458,9 @@ async def test_azure_ai_chat_client_create_run_options_with_image_content(mock_a chat_client = create_test_azure_ai_chat_client(mock_ai_project_client, agent_id="test-agent") + # Mock get_agent + mock_ai_project_client.agents.get_agent = AsyncMock(return_value=None) + image_content = UriContent(uri="https://example.com/image.jpg", media_type="image/jpeg") messages = [ChatMessage(role=Role.USER, contents=[image_content])] @@ -544,6 +554,19 @@ async def test_azure_ai_chat_client_create_run_options_with_messages(mock_ai_pro assert len(run_options["additional_messages"]) == 1 # Only user message +async def test_azure_ai_chat_client_instructions_sent_once(mock_ai_project_client: MagicMock) -> None: + """Ensure instructions are only sent once for AzureAIAgentClient.""" + chat_client = create_test_azure_ai_chat_client(mock_ai_project_client) + + instructions = "You are a helpful assistant." + chat_options = ChatOptions(instructions=instructions) + messages = chat_client.prepare_messages([ChatMessage(role=Role.USER, text="Hello")], chat_options) + + run_options, _ = await chat_client._create_run_options(messages, chat_options) # type: ignore + + assert run_options.get("instructions") == instructions + + async def test_azure_ai_chat_client_inner_get_response(mock_ai_project_client: MagicMock) -> None: """Test _inner_get_response method.""" chat_client = create_test_azure_ai_chat_client(mock_ai_project_client, agent_id="test-agent") @@ -577,6 +600,7 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_run_options( "tools": [{"type": "function", "function": {"name": "test_tool"}}], "instructions": "Test instructions", "response_format": {"type": "json_object"}, + "model": azure_ai_settings.model_deployment_name, } agent_id = await chat_client._get_agent_id_or_create(run_options) # type: ignore @@ -810,7 +834,7 @@ async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding(mock_ai web_search_tool = HostedWebSearchTool( additional_properties={ - "connection_id": "test-connection-id", + "connection_name": "test-connection-name", "count": 5, "freshness": "Day", "market": "en-US", @@ -818,6 +842,11 @@ async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding(mock_ai } ) + # Mock connection get + mock_connection = MagicMock() + mock_connection.id = "test-connection-id" + mock_ai_project_client.connections.get = AsyncMock(return_value=mock_connection) + # Mock BingGroundingTool with patch("agent_framework_azure_ai._chat_client.BingGroundingTool") as mock_bing_grounding: mock_bing_tool = MagicMock() @@ -833,6 +862,35 @@ async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding(mock_ai ) +async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding_with_connection_id( + mock_ai_project_client: MagicMock, +) -> None: + """Test _prep_tools with HostedWebSearchTool using Bing Grounding with connection_id (no HTTP call).""" + + chat_client = create_test_azure_ai_chat_client(mock_ai_project_client, agent_id="test-agent") + + web_search_tool = HostedWebSearchTool( + additional_properties={ + "connection_id": "direct-connection-id", + "count": 3, + } + ) + + # Mock BingGroundingTool + with patch("agent_framework_azure_ai._chat_client.BingGroundingTool") as mock_bing_grounding: + mock_bing_tool = MagicMock() + mock_bing_tool.definitions = [{"type": "bing_grounding"}] + mock_bing_grounding.return_value = mock_bing_tool + + result = await chat_client._prep_tools([web_search_tool]) # type: ignore + + assert len(result) == 1 + assert result[0] == {"type": "bing_grounding"} + # Verify that connection_id was used directly (no HTTP call to connections.get) + mock_ai_project_client.connections.get.assert_not_called() + mock_bing_grounding.assert_called_once_with(connection_id="direct-connection-id", count=3) + + async def test_azure_ai_chat_client_prep_tools_web_search_custom_bing(mock_ai_project_client: MagicMock) -> None: """Test _prep_tools with HostedWebSearchTool using Custom Bing Search.""" @@ -888,15 +946,23 @@ async def test_azure_ai_chat_client_prep_tools_web_search_custom_bing_connection await chat_client._prep_tools([web_search_tool]) # type: ignore -async def test_azure_ai_chat_client_prep_tools_web_search_missing_config(mock_ai_project_client: MagicMock) -> None: - """Test _prep_tools with HostedWebSearchTool missing required configuration.""" +async def test_azure_ai_chat_client_prep_tools_web_search_bing_grounding_connection_error( + mock_ai_project_client: MagicMock, +) -> None: + """Test _prep_tools with HostedWebSearchTool when Bing Grounding connection is not found.""" chat_client = create_test_azure_ai_chat_client(mock_ai_project_client, agent_id="test-agent") - # Web search tool with no connection configuration - web_search_tool = HostedWebSearchTool() + web_search_tool = HostedWebSearchTool( + additional_properties={ + "connection_name": "nonexistent-bing-connection", + } + ) - with pytest.raises(ServiceInitializationError, match="Bing search tool requires either a 'connection_id'"): + # Mock connection get to raise HttpResponseError + mock_ai_project_client.connections.get = AsyncMock(side_effect=HttpResponseError("Connection not found")) + + with pytest.raises(ServiceInitializationError, match="Bing connection 'nonexistent-bing-connection' not found"): await chat_client._prep_tools([web_search_tool]) # type: ignore @@ -1277,7 +1343,7 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_agent_name( # Ensure agent_name is None to test the default chat_client.agent_name = None # type: ignore - agent_id = await chat_client._get_agent_id_or_create() # type: ignore + agent_id = await chat_client._get_agent_id_or_create(run_options={"model": azure_ai_settings.model_deployment_name}) # type: ignore assert agent_id == "test-agent-id" # Verify create_agent was called with default "UnnamedAgent" @@ -1294,7 +1360,7 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_response_format( chat_client = create_test_azure_ai_chat_client(mock_ai_project_client, azure_ai_settings=azure_ai_settings) # Test with response_format in run_options - run_options = {"response_format": {"type": "json_object"}} + run_options = {"response_format": {"type": "json_object"}, "model": azure_ai_settings.model_deployment_name} agent_id = await chat_client._get_agent_id_or_create(run_options) # type: ignore @@ -1313,7 +1379,10 @@ async def test_azure_ai_chat_client_get_agent_id_or_create_with_tool_resources( chat_client = create_test_azure_ai_chat_client(mock_ai_project_client, azure_ai_settings=azure_ai_settings) # Test with tool_resources in run_options - run_options = {"tool_resources": {"vector_store_ids": ["vs-123"]}} + run_options = { + "tool_resources": {"vector_store_ids": ["vs-123"]}, + "model": azure_ai_settings.model_deployment_name, + } agent_id = await chat_client._get_agent_id_or_create(run_options) # type: ignore @@ -1370,6 +1439,28 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_outputs( assert final_thread_id == "test-thread" +async def test_azure_ai_chat_client_setup_azure_ai_observability_resource_not_found( + mock_ai_project_client: MagicMock, +) -> None: + """Test setup_azure_ai_observability when Application Insights connection string is not found.""" + chat_client = create_test_azure_ai_chat_client(mock_ai_project_client, agent_id="test-agent") + + # Mock telemetry.get_application_insights_connection_string to raise ResourceNotFoundError + mock_ai_project_client.telemetry.get_application_insights_connection_string = AsyncMock( + side_effect=ResourceNotFoundError("No Application Insights found") + ) + + # Mock logger.warning to capture the warning message + with patch("agent_framework_azure_ai._chat_client.logger") as mock_logger: + await chat_client.setup_azure_ai_observability() + + # Verify warning was logged + mock_logger.warning.assert_called_once_with( + "No Application Insights connection string found for the Azure AI Project, " + "please call setup_observability() manually." + ) + + def get_weather( location: Annotated[str, Field(description="The location to get the weather for.")], ) -> str: diff --git a/python/packages/copilotstudio/pyproject.toml b/python/packages/copilotstudio/pyproject.toml index 89b1b3491e..2ab391489b 100644 --- a/python/packages/copilotstudio/pyproject.toml +++ b/python/packages/copilotstudio/pyproject.toml @@ -4,7 +4,7 @@ description = "Copilot Studio integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251001" +version = "1.0.0b251007" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/core/agent_framework/_agents.py b/python/packages/core/agent_framework/_agents.py index 55e0ae42e3..8177bca4ce 100644 --- a/python/packages/core/agent_framework/_agents.py +++ b/python/packages/core/agent_framework/_agents.py @@ -486,7 +486,7 @@ class ChatAgent(BaseAgent): from agent_framework.clients import OpenAIChatClient # Create a basic chat agent - client = OpenAIChatClient(model="gpt-4") + client = OpenAIChatClient(model_id="gpt-4") agent = ChatAgent(chat_client=client, name="assistant", description="A helpful assistant") # Run the agent with a simple message @@ -514,6 +514,26 @@ def get_weather(location: str) -> str: # Use streaming responses async for update in agent.run_stream("What's the weather in Paris?"): print(update.text, end="") + + With additional provider specific options: + + .. code-block:: python + + agent = ChatAgent( + chat_client=client, + name="reasoning-agent", + instructions="You are a reasoning assistant.", + model_id="gpt-5", + temperature=0.7, + max_tokens=500, + additional_chat_options={ + "reasoning": {"effort": "high", "summary": "concise"} + }, # OpenAI Responses specific. + ) + + # Use streaming responses + async for update in agent.run_stream("How do you prove the pythagorean theorem?"): + print(update.text, end="") """ AGENT_SYSTEM_NAME: ClassVar[str] = "microsoft.agent_framework" @@ -534,7 +554,7 @@ def __init__( logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, metadata: dict[str, Any] | None = None, - model: str | None = None, + model_id: str | None = None, presence_penalty: float | None = None, response_format: type[BaseModel] | None = None, seed: int | None = None, @@ -549,7 +569,7 @@ def __init__( | None = None, top_p: float | None = None, user: str | None = None, - request_kwargs: dict[str, Any] | None = None, + additional_chat_options: dict[str, Any] | None = None, **kwargs: Any, ) -> None: """Initialize a ChatAgent instance. @@ -578,7 +598,7 @@ def __init__( logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. metadata: Additional metadata to include in the request. - model: The model to use for the agent. + model_id: The model_id to use for the agent. presence_penalty: The presence penalty to use. response_format: The format of the response. seed: The random seed to use. @@ -589,8 +609,9 @@ def __init__( tools: The tools to use for the request. top_p: The nucleus sampling probability to use. user: The user to associate with the request. - request_kwargs: A dictionary of other values that will be passed through + additional_chat_options: A dictionary of other values that will be passed through to the chat_client ``get_response`` and ``get_streaming_response`` methods. + This can be used to pass provider specific parameters. kwargs: Any additional keyword arguments. Will be stored as ``additional_properties``. Raises: @@ -626,7 +647,7 @@ def __init__( self._local_mcp_tools = [tool for tool in normalized_tools if isinstance(tool, MCPTool)] agent_tools = [tool for tool in normalized_tools if not isinstance(tool, MCPTool)] self.chat_options = ChatOptions( - model_id=model, + model_id=model_id, conversation_id=conversation_id, frequency_penalty=frequency_penalty, instructions=instructions, @@ -643,7 +664,7 @@ def __init__( tools=agent_tools, top_p=top_p, user=user, - additional_properties=request_kwargs or {}, # type: ignore + additional_properties=additional_chat_options or {}, # type: ignore ) self._async_exit_stack = AsyncExitStack() self._update_agent_name() @@ -701,7 +722,7 @@ async def run( logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, metadata: dict[str, Any] | None = None, - model: str | None = None, + model_id: str | None = None, presence_penalty: float | None = None, response_format: type[BaseModel] | None = None, seed: int | None = None, @@ -716,7 +737,7 @@ async def run( | None = None, top_p: float | None = None, user: str | None = None, - additional_properties: dict[str, Any] | None = None, + additional_chat_options: dict[str, Any] | None = None, **kwargs: Any, ) -> AgentRunResponse: """Run the agent with the given messages and options. @@ -736,7 +757,7 @@ async def run( logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. metadata: Additional metadata to include in the request. - model: The model to use for the agent. + model_id: The model_id to use for the agent. presence_penalty: The presence penalty to use. response_format: The format of the response. seed: The random seed to use. @@ -747,7 +768,8 @@ async def run( tools: The tools to use for the request. top_p: The nucleus sampling probability to use. user: The user to associate with the request. - additional_properties: Additional properties to include in the request. + additional_chat_options: Additional properties to include in the request. + Use this field for provider-specific parameters. kwargs: Additional keyword arguments for the agent. Will only be passed to functions that are called. @@ -778,30 +800,27 @@ async def run( if not mcp_server.is_connected: await self._async_exit_stack.enter_async_context(mcp_server) final_tools.extend(mcp_server.functions) - response = await self.chat_client.get_response( - messages=thread_messages, - chat_options=run_chat_options - & ChatOptions( - model_id=model, - conversation_id=thread.service_thread_id, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - max_tokens=max_tokens, - metadata=metadata, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, - store=store, - temperature=temperature, - tool_choice=tool_choice, - tools=final_tools, - top_p=top_p, - user=user, - additional_properties=additional_properties or {}, - ), - **kwargs, + + co = run_chat_options & ChatOptions( + model_id=model_id, + conversation_id=thread.service_thread_id, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + max_tokens=max_tokens, + metadata=metadata, + presence_penalty=presence_penalty, + response_format=response_format, + seed=seed, + stop=stop, + store=store, + temperature=temperature, + tool_choice=tool_choice, + tools=final_tools, + top_p=top_p, + user=user, + **(additional_chat_options or {}), ) + response = await self.chat_client.get_response(messages=thread_messages, chat_options=co, **kwargs) await self._update_thread_with_type_and_conversation_id(thread, response.conversation_id) @@ -832,7 +851,7 @@ async def run_stream( logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, metadata: dict[str, Any] | None = None, - model: str | None = None, + model_id: str | None = None, presence_penalty: float | None = None, response_format: type[BaseModel] | None = None, seed: int | None = None, @@ -847,7 +866,7 @@ async def run_stream( | None = None, top_p: float | None = None, user: str | None = None, - additional_properties: dict[str, Any] | None = None, + additional_chat_options: dict[str, Any] | None = None, **kwargs: Any, ) -> AsyncIterable[AgentRunResponseUpdate]: """Stream the agent with the given messages and options. @@ -867,7 +886,7 @@ async def run_stream( logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. metadata: Additional metadata to include in the request. - model: The model to use for the agent. + model_id: The model_id to use for the agent. presence_penalty: The presence penalty to use. response_format: The format of the response. seed: The random seed to use. @@ -878,7 +897,8 @@ async def run_stream( tools: The tools to use for the request. top_p: The nucleus sampling probability to use. user: The user to associate with the request. - additional_properties: Additional properties to include in the request. + additional_chat_options: Additional properties to include in the request. + Use this field for provider-specific parameters. kwargs: Any additional keyword arguments. Will only be passed to functions that are called. @@ -890,8 +910,6 @@ async def run_stream( thread=thread, input_messages=input_messages ) agent_name = self._get_agent_name() - response_updates: list[ChatResponseUpdate] = [] - # Resolve final tool list (runtime provided tools + local MCP server tools) final_tools: list[ToolProtocol | MutableMapping[str, Any] | Callable[..., Any]] = [] normalized_tools: list[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] = ( # type: ignore[reportUnknownVariableType] @@ -911,29 +929,29 @@ async def run_stream( await self._async_exit_stack.enter_async_context(mcp_server) final_tools.extend(mcp_server.functions) + co = run_chat_options & ChatOptions( + conversation_id=thread.service_thread_id, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + max_tokens=max_tokens, + metadata=metadata, + model_id=model_id, + presence_penalty=presence_penalty, + response_format=response_format, + seed=seed, + stop=stop, + store=store, + temperature=temperature, + tool_choice=tool_choice, + tools=final_tools, + top_p=top_p, + user=user, + **(additional_chat_options or {}), + ) + + response_updates: list[ChatResponseUpdate] = [] async for update in self.chat_client.get_streaming_response( - messages=thread_messages, - chat_options=run_chat_options - & ChatOptions( - conversation_id=thread.service_thread_id, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, - max_tokens=max_tokens, - metadata=metadata, - model_id=model, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, - store=store, - temperature=temperature, - tool_choice=tool_choice, - tools=final_tools, - top_p=top_p, - user=user, - additional_properties=additional_properties or {}, - ), - **kwargs, + messages=thread_messages, chat_options=co, **kwargs ): response_updates.append(update) @@ -951,7 +969,7 @@ async def run_stream( raw_representation=update, ) - response = ChatResponse.from_chat_response_updates(response_updates) + response = ChatResponse.from_chat_response_updates(response_updates, output_format_type=co.response_format) await self._update_thread_with_type_and_conversation_id(thread, response.conversation_id) await self._notify_thread_of_new_messages(thread, input_messages, response.messages) diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py index efa22335ad..4089aea9e3 100644 --- a/python/packages/core/agent_framework/_clients.py +++ b/python/packages/core/agent_framework/_clients.py @@ -101,7 +101,7 @@ async def get_response( logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, metadata: dict[str, Any] | None = None, - model: str | None = None, + model_id: str | None = None, presence_penalty: float | None = None, response_format: type[BaseModel] | None = None, seed: int | None = None, @@ -129,7 +129,7 @@ async def get_response( logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. metadata: Additional metadata to include in the request. - model: The model to use for the agent. + model_id: The model_id to use for the agent. presence_penalty: The presence penalty to use. response_format: The format of the response. seed: The random seed to use. @@ -160,7 +160,7 @@ def get_streaming_response( logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, metadata: dict[str, Any] | None = None, - model: str | None = None, + model_id: str | None = None, presence_penalty: float | None = None, response_format: type[BaseModel] | None = None, seed: int | None = None, @@ -188,7 +188,7 @@ def get_streaming_response( logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. metadata: Additional metadata to include in the request. - model: The model to use for the agent. + model_id: The model_id to use for the agent. presence_penalty: The presence penalty to use. response_format: The format of the response. seed: The random seed to use. @@ -240,7 +240,7 @@ def prepare_messages(messages: str | ChatMessage | list[str] | list[ChatMessage] def merge_chat_options( *, base_chat_options: ChatOptions | Any | None, - model: str | None = None, + model_id: str | None = None, frequency_penalty: float | None = None, logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, @@ -265,7 +265,7 @@ def merge_chat_options( Keyword Args: base_chat_options: Optional base ChatOptions to merge with direct parameters. - model: The model to use for the agent. + model_id: The model_id to use for the agent. frequency_penalty: The frequency penalty to use. logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. @@ -292,40 +292,11 @@ def merge_chat_options( if base_chat_options is not None and not isinstance(base_chat_options, ChatOptions): raise TypeError("chat_options must be an instance of ChatOptions") - if base_chat_options is not None: - # Combine tools from both sources - base_tools = base_chat_options.tools or [] - combined_tools = [*base_tools, *(tools or [])] if tools else base_tools - - # Create new chat_options, using direct parameters when provided, otherwise fall back to base - return ChatOptions( - model_id=model if model is not None else base_chat_options.model_id, - frequency_penalty=( - frequency_penalty if frequency_penalty is not None else base_chat_options.frequency_penalty - ), - logit_bias=logit_bias if logit_bias is not None else base_chat_options.logit_bias, - max_tokens=max_tokens if max_tokens is not None else base_chat_options.max_tokens, - metadata=metadata if metadata is not None else base_chat_options.metadata, - presence_penalty=(presence_penalty if presence_penalty is not None else base_chat_options.presence_penalty), - response_format=(response_format if response_format is not None else base_chat_options.response_format), - seed=seed if seed is not None else base_chat_options.seed, - stop=stop if stop is not None else base_chat_options.stop, - store=store if store is not None else base_chat_options.store, - temperature=temperature if temperature is not None else base_chat_options.temperature, - top_p=top_p if top_p is not None else base_chat_options.top_p, - tool_choice=( - tool_choice if (tool_choice is not None and tool_choice != "auto") else base_chat_options.tool_choice # type: ignore[arg-type] - ), - tools=combined_tools or None, - user=user if user is not None else base_chat_options.user, - additional_properties=( - additional_properties if additional_properties is not None else base_chat_options.additional_properties - ), - conversation_id=base_chat_options.conversation_id, - ) - # No base options, create from direct parameters only - return ChatOptions( - model_id=model, + if base_chat_options is None: + base_chat_options = ChatOptions() + + return base_chat_options & ChatOptions( + model_id=model_id, frequency_penalty=frequency_penalty, logit_bias=logit_bias, max_tokens=max_tokens, @@ -340,7 +311,7 @@ def merge_chat_options( tool_choice=tool_choice, tools=tools, user=user, - additional_properties=additional_properties or {}, + additional_properties=additional_properties, ) @@ -560,7 +531,7 @@ async def get_response( logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, metadata: dict[str, Any] | None = None, - model: str | None = None, + model_id: str | None = None, presence_penalty: float | None = None, response_format: type[BaseModel] | None = None, seed: int | None = None, @@ -592,7 +563,7 @@ async def get_response( logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. metadata: Additional metadata to include in the request. - model: The model to use for the agent. + model_id: The model_id to use for the agent. presence_penalty: The presence penalty to use. response_format: The format of the response. seed: The random seed to use. @@ -604,17 +575,18 @@ async def get_response( top_p: The nucleus sampling probability to use. user: The user to associate with the request. additional_properties: Additional properties to include in the request. + Can be used for provider-specific parameters. kwargs: Any additional keyword arguments. May include ``chat_options`` which provides base values that can be overridden by direct parameters. Returns: - A chat response from the model. + A chat response from the model_id. """ # Normalize tools and merge with base chat_options normalized_tools = await self._normalize_tools(tools) chat_options = merge_chat_options( base_chat_options=kwargs.pop("chat_options", None), - model=model, + model_id=model_id, frequency_penalty=frequency_penalty, logit_bias=logit_bias, max_tokens=max_tokens, @@ -654,7 +626,7 @@ async def get_streaming_response( logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, metadata: dict[str, Any] | None = None, - model: str | None = None, + model_id: str | None = None, presence_penalty: float | None = None, response_format: type[BaseModel] | None = None, seed: int | None = None, @@ -686,7 +658,7 @@ async def get_streaming_response( logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. metadata: Additional metadata to include in the request. - model: The model to use for the agent. + model_id: The model_id to use for the agent. presence_penalty: The presence penalty to use. response_format: The format of the response. seed: The random seed to use. @@ -698,6 +670,7 @@ async def get_streaming_response( top_p: The nucleus sampling probability to use. user: The user to associate with the request. additional_properties: Additional properties to include in the request. + Can be used for provider-specific parameters. kwargs: Any additional keyword arguments. May include ``chat_options`` which provides base values that can be overridden by direct parameters. @@ -708,7 +681,7 @@ async def get_streaming_response( normalized_tools = await self._normalize_tools(tools) chat_options = merge_chat_options( base_chat_options=kwargs.pop("chat_options", None), - model=model, + model_id=model_id, frequency_penalty=frequency_penalty, logit_bias=logit_bias, max_tokens=max_tokens, @@ -787,7 +760,7 @@ def create_agent( logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, metadata: dict[str, Any] | None = None, - model: str | None = None, + model_id: str | None = None, presence_penalty: float | None = None, response_format: type[BaseModel] | None = None, seed: int | None = None, @@ -802,7 +775,7 @@ def create_agent( | None = None, top_p: float | None = None, user: str | None = None, - request_kwargs: dict[str, Any] | None = None, + additional_chat_options: dict[str, Any] | None = None, **kwargs: Any, ) -> "ChatAgent": """Create a ChatAgent with this client. @@ -824,7 +797,7 @@ def create_agent( logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. metadata: Additional metadata to include in the request. - model: The model to use for the agent. + model_id: The model_id to use for the agent. presence_penalty: The presence penalty to use. response_format: The format of the response. seed: The random seed to use. @@ -835,8 +808,9 @@ def create_agent( tools: The tools to use for the request. top_p: The nucleus sampling probability to use. user: The user to associate with the request. - request_kwargs: A dictionary of other values that will be passed through + additional_chat_options: A dictionary of other values that will be passed through to the chat_client ``get_response`` and ``get_streaming_response`` methods. + This can be used to pass provider specific parameters. kwargs: Any additional keyword arguments. Will be stored as ``additional_properties``. Returns: @@ -848,7 +822,7 @@ def create_agent( from agent_framework.clients import OpenAIChatClient # Create a client - client = OpenAIChatClient(model="gpt-4") + client = OpenAIChatClient(model_id="gpt-4") # Create an agent using the convenience method agent = client.create_agent( @@ -873,7 +847,7 @@ def create_agent( logit_bias=logit_bias, max_tokens=max_tokens, metadata=metadata, - model=model, + model_id=model_id, presence_penalty=presence_penalty, response_format=response_format, seed=seed, @@ -884,6 +858,6 @@ def create_agent( tools=tools, top_p=top_p, user=user, - request_kwargs=request_kwargs, + additional_chat_options=additional_chat_options, **kwargs, ) diff --git a/python/packages/core/agent_framework/_middleware.py b/python/packages/core/agent_framework/_middleware.py index 3648f89b13..82b0f998fb 100644 --- a/python/packages/core/agent_framework/_middleware.py +++ b/python/packages/core/agent_framework/_middleware.py @@ -221,7 +221,7 @@ class TokenCounterMiddleware(ChatMiddleware): async def process(self, context: ChatContext, next): print(f"Chat client: {context.chat_client.__class__.__name__}") print(f"Messages: {len(context.messages)}") - print(f"Model: {context.chat_options.model}") + print(f"Model: {context.chat_options.model_id}") # Store metadata context.metadata["input_tokens"] = self.count_tokens(context.messages) diff --git a/python/packages/core/agent_framework/_serialization.py b/python/packages/core/agent_framework/_serialization.py index 02535329ec..f3cb42185a 100644 --- a/python/packages/core/agent_framework/_serialization.py +++ b/python/packages/core/agent_framework/_serialization.py @@ -163,7 +163,7 @@ def to_dict(self, *, exclude: set[str] | None = None, exclude_none: bool = True) combined_exclude.update(self.INJECTABLE) # Get all instance attributes - result: dict[str, Any] = {"type": self._get_type_identifier()} + result: dict[str, Any] = {} if "type" in combined_exclude else {"type": self._get_type_identifier()} for key, value in self.__dict__.items(): if key not in combined_exclude and not key.startswith("_"): if exclude_none and value is None: @@ -212,17 +212,18 @@ def to_dict(self, *, exclude: set[str] | None = None, exclude_none: bool = True) return result - def to_json(self, *, exclude: set[str] | None = None, exclude_none: bool = True) -> str: + def to_json(self, *, exclude: set[str] | None = None, exclude_none: bool = True, **kwargs: Any) -> str: """Convert the instance to a JSON string. Keyword Args: exclude: The set of field names to exclude from serialization. exclude_none: Whether to exclude None values from the output. Defaults to True. + **kwargs: passed through to the json.dumps method. Returns: JSON string representation of the instance. """ - return json.dumps(self.to_dict(exclude=exclude, exclude_none=exclude_none)) + return json.dumps(self.to_dict(exclude=exclude, exclude_none=exclude_none), **kwargs) @classmethod def from_dict( diff --git a/python/packages/core/agent_framework/_threads.py b/python/packages/core/agent_framework/_threads.py index ff8cffc39f..f7603a7c3c 100644 --- a/python/packages/core/agent_framework/_threads.py +++ b/python/packages/core/agent_framework/_threads.py @@ -1,11 +1,10 @@ # Copyright (c) Microsoft. All rights reserved. -from collections.abc import Sequence +from collections.abc import MutableMapping, Sequence from typing import Any, Protocol, TypeVar -from pydantic import BaseModel, ConfigDict, model_validator - from ._memory import AggregateContextProvider +from ._serialization import SerializationMixin from ._types import ChatMessage from .exceptions import AgentThreadException @@ -73,7 +72,9 @@ async def add_messages(self, messages: Sequence[ChatMessage]) -> None: ... @classmethod - async def deserialize(cls, serialized_store_state: Any, **kwargs: Any) -> "ChatMessageStoreProtocol": + async def deserialize( + cls, serialized_store_state: MutableMapping[str, Any], **kwargs: Any + ) -> "ChatMessageStoreProtocol": """Creates a new instance of the store from previously serialized state. This method, together with ``serialize()`` can be used to save and load messages from a persistent store @@ -90,7 +91,7 @@ async def deserialize(cls, serialized_store_state: Any, **kwargs: Any) -> "ChatM """ ... - async def update_from_state(self, serialized_store_state: Any, **kwargs: Any) -> None: + async def update_from_state(self, serialized_store_state: MutableMapping[str, Any], **kwargs: Any) -> None: """Update the current ChatMessageStore instance from serialized state data. Args: @@ -101,7 +102,7 @@ async def update_from_state(self, serialized_store_state: Any, **kwargs: Any) -> """ ... - async def serialize(self, **kwargs: Any) -> Any: + async def serialize(self, **kwargs: Any) -> dict[str, Any]: """Serializes the current object's state. This method, together with ``deserialize()`` can be used to save and load messages from a persistent store @@ -116,40 +117,66 @@ async def serialize(self, **kwargs: Any) -> Any: ... -class ChatMessageStoreState(BaseModel): +class ChatMessageStoreState(SerializationMixin): """State model for serializing and deserializing chat message store data. Attributes: messages: List of chat messages stored in the message store. """ - messages: list[ChatMessage] - - model_config = ConfigDict(arbitrary_types_allowed=True) - + def __init__( + self, + messages: Sequence[ChatMessage] | Sequence[MutableMapping[str, Any]] | None = None, + **kwargs: Any, + ) -> None: + """Create the store state. -class AgentThreadState(BaseModel): - """State model for serializing and deserializing thread information. + Args: + messages: a list of messages or a list of the dict representation of messages. - Attributes: - service_thread_id: Optional ID of the thread managed by the agent service. - chat_message_store_state: Optional serialized state of the chat message store. - """ + Keyword Args: + **kwargs: not used for this, but might be used by subclasses. - service_thread_id: str | None = None - chat_message_store_state: ChatMessageStoreState | None = None + """ + if not messages: + self.messages: list[ChatMessage] = [] + if not isinstance(messages, list): + raise TypeError("Messages should be a list") + new_messages: list[ChatMessage] = [] + for msg in messages: + if isinstance(msg, ChatMessage): + new_messages.append(msg) + else: + new_messages.append(ChatMessage.from_dict(msg)) + self.messages = new_messages + + +class AgentThreadState(SerializationMixin): + """State model for serializing and deserializing thread information.""" - model_config = ConfigDict(arbitrary_types_allowed=True) + def __init__( + self, + *, + service_thread_id: str | None = None, + chat_message_store_state: ChatMessageStoreState | MutableMapping[str, Any] | None = None, + ) -> None: + """Create a AgentThread state. - @model_validator(mode="before") - def validate_only_one(cls, values: dict[str, Any]) -> dict[str, Any]: - if ( - isinstance(values, dict) - and values.get("service_thread_id") is not None - and values.get("chat_message_store_state") is not None - ): - raise AgentThreadException("Only one of service_thread_id or chat_message_store_state may be set.") - return values + Keyword Args: + service_thread_id: Optional ID of the thread managed by the agent service. + chat_message_store_state: Optional serialized state of the chat message store. + """ + if service_thread_id is not None and chat_message_store_state is not None: + raise AgentThreadException("A thread cannot have both a service_thread_id and a chat_message_store.") + self.service_thread_id = service_thread_id + self.chat_message_store_state: ChatMessageStoreState | None = None + if chat_message_store_state is not None: + if isinstance(chat_message_store_state, dict): + self.chat_message_store_state = ChatMessageStoreState.from_dict(chat_message_store_state) + elif isinstance(chat_message_store_state, ChatMessageStoreState): + self.chat_message_store_state = chat_message_store_state + else: + raise TypeError("Could not parse ChatMessageStoreState.") TChatMessageStore = TypeVar("TChatMessageStore", bound="ChatMessageStore") @@ -213,7 +240,7 @@ async def list_messages(self) -> list[ChatMessage]: @classmethod async def deserialize( - cls: type[TChatMessageStore], serialized_store_state: Any, **kwargs: Any + cls: type[TChatMessageStore], serialized_store_state: MutableMapping[str, Any], **kwargs: Any ) -> TChatMessageStore: """Create a new ChatMessageStore instance from serialized state data. @@ -226,12 +253,12 @@ async def deserialize( Returns: A new ChatMessageStore instance populated with messages from the serialized state. """ - state = ChatMessageStoreState.model_validate(serialized_store_state, **kwargs) + state = ChatMessageStoreState.from_dict(serialized_store_state, **kwargs) if state.messages: return cls(messages=state.messages) return cls() - async def update_from_state(self, serialized_store_state: Any, **kwargs: Any) -> None: + async def update_from_state(self, serialized_store_state: MutableMapping[str, Any], **kwargs: Any) -> None: """Update the current ChatMessageStore instance from serialized state data. Args: @@ -242,11 +269,11 @@ async def update_from_state(self, serialized_store_state: Any, **kwargs: Any) -> """ if not serialized_store_state: return - state = ChatMessageStoreState.model_validate(serialized_store_state, **kwargs) + state = ChatMessageStoreState.from_dict(serialized_store_state, **kwargs) if state.messages: self.messages = state.messages - async def serialize(self, **kwargs: Any) -> Any: + async def serialize(self, **kwargs: Any) -> dict[str, Any]: """Serialize the current store state for persistence. Keyword Args: @@ -256,7 +283,7 @@ async def serialize(self, **kwargs: Any) -> Any: Serialized state data that can be used with deserialize_state. """ state = ChatMessageStoreState(messages=self.messages) - return state.model_dump(**kwargs) + return state.to_dict() TAgentThread = TypeVar("TAgentThread", bound="AgentThread") @@ -403,12 +430,12 @@ async def serialize(self, **kwargs: Any) -> dict[str, Any]: state = AgentThreadState( service_thread_id=self._service_thread_id, chat_message_store_state=chat_message_store_state ) - return state.model_dump() + return state.to_dict(exclude_none=False) @classmethod async def deserialize( cls: type[TAgentThread], - serialized_thread_state: dict[str, Any], + serialized_thread_state: MutableMapping[str, Any], *, message_store: ChatMessageStoreProtocol | None = None, **kwargs: Any, @@ -426,7 +453,7 @@ async def deserialize( Returns: A new AgentThread instance with properties set from the serialized state. """ - state = AgentThreadState.model_validate(serialized_thread_state) + state = AgentThreadState.from_dict(serialized_thread_state) if state.service_thread_id is not None: return cls(service_thread_id=state.service_thread_id) @@ -437,19 +464,19 @@ async def deserialize( if message_store is not None: try: - await message_store.update_from_state(state.chat_message_store_state, **kwargs) + await message_store.add_messages(state.chat_message_store_state.messages, **kwargs) except Exception as ex: raise AgentThreadException("Failed to deserialize the provided message store.") from ex return cls(message_store=message_store) try: - message_store = await ChatMessageStore.deserialize(state.chat_message_store_state, **kwargs) + message_store = ChatMessageStore(messages=state.chat_message_store_state.messages, **kwargs) except Exception as ex: raise AgentThreadException("Failed to deserialize the message store.") from ex return cls(message_store=message_store) async def update_from_thread_state( self, - serialized_thread_state: dict[str, Any], + serialized_thread_state: MutableMapping[str, Any], **kwargs: Any, ) -> None: """Deserializes the state from a dictionary into the thread properties. @@ -460,7 +487,7 @@ async def update_from_thread_state( Keyword Args: **kwargs: Additional arguments for deserialization. """ - state = AgentThreadState.model_validate(serialized_thread_state) + state = AgentThreadState.from_dict(serialized_thread_state) if state.service_thread_id is not None: self.service_thread_id = state.service_thread_id @@ -470,8 +497,8 @@ async def update_from_thread_state( if state.chat_message_store_state is None: return if self.message_store is not None: - await self.message_store.update_from_state(state.chat_message_store_state, **kwargs) + await self.message_store.add_messages(state.chat_message_store_state.messages, **kwargs) # If we don't have a chat message store yet, create an in-memory one. return # Create the message store from the default. - self.message_store = await ChatMessageStore.deserialize(state.chat_message_store_state, **kwargs) # type: ignore + self.message_store = ChatMessageStore(messages=state.chat_message_store_state.messages, **kwargs) diff --git a/python/packages/core/agent_framework/_types.py b/python/packages/core/agent_framework/_types.py index e1c40943cf..3294f78a5d 100644 --- a/python/packages/core/agent_framework/_types.py +++ b/python/packages/core/agent_framework/_types.py @@ -13,14 +13,14 @@ Sequence, ) from copy import deepcopy -from typing import Any, ClassVar, Literal, TypeVar, overload +from typing import Any, ClassVar, Literal, TypeVar, cast, overload from pydantic import BaseModel, ValidationError from ._logging import get_logger from ._serialization import SerializationMixin from ._tools import ToolProtocol, ai_function -from .exceptions import AdditionItemMismatch +from .exceptions import AdditionItemMismatch, ContentError if sys.version_info >= (3, 11): from typing import Self # pragma: no cover @@ -96,7 +96,10 @@ def _parse_content(content_data: MutableMapping[str, Any]) -> "Contents": content_data: Content data (dict) Returns: - Content object or raises ValidationError if parsing fails + Content object + + Raises: + ContentError if parsing fails """ content_type = str(content_data.get("type")) match content_type: @@ -125,7 +128,7 @@ def _parse_content(content_data: MutableMapping[str, Any]) -> "Contents": case "text_reasoning": return TextReasoningContent.from_dict(content_data) case _: - raise ValidationError([f"Unknown content type '{content_type}'"], model=Contents) # type: ignore + raise ContentError(f"Unknown content type '{content_type}'") def _parse_content_list(contents_data: Sequence[Any]) -> list["Contents"]: @@ -143,8 +146,8 @@ def _parse_content_list(contents_data: Sequence[Any]) -> list["Contents"]: try: content = _parse_content(content_data) contents.append(content) - except ValidationError as ve: - logger.warning(f"Skipping unknown content type or invalid content: {ve}") + except ContentError as exc: + logger.warning(f"Skipping unknown content type or invalid content: {exc}") else: # If it's already a content object, keep it as is contents.append(content_data) @@ -2098,8 +2101,8 @@ def _process_update( try: cont = _parse_content(content) message.contents.append(cont) - except ValidationError as ve: - logger.warning(f"Skipping unknown content type or invalid content: {ve}") + except ContentError as exc: + logger.warning(f"Skipping unknown content type or invalid content: {exc}") else: message.contents.append(content) # Incorporate the update's properties into the response. @@ -2816,12 +2819,12 @@ def __init__( kwargs: will be combined with additional_properties if provided. """ - contents = [] if contents is None else _parse_content_list(contents) + parsed_contents: list[Contents] = [] if contents is None else _parse_content_list(contents) if text is not None: if isinstance(text, str): text = TextContent(text=text) - contents.append(text) + parsed_contents.append(text) # Convert role from dict if needed (for SerializationMixin support) if isinstance(role, MutableMapping): @@ -2829,7 +2832,7 @@ def __init__( elif isinstance(role, str): role = Role(value=role) - self.contents = contents + self.contents = parsed_contents self.role = role self.author_name = author_name self.response_id = response_id @@ -3011,11 +3014,11 @@ def __init__( top_p: float | None = None, user: str | None = None, additional_properties: MutableMapping[str, Any] | None = None, + **kwargs: Any, ): """Initialize ChatOptions. Keyword Args: - additional_properties: Provider-specific additional properties. model_id: The AI model ID to use. allow_multiple_tool_calls: Whether to allow multiple tool calls. conversation_id: The conversation ID. @@ -3034,6 +3037,8 @@ def __init__( tools: List of available tools. top_p: The top-p value (must be between 0.0 and 1.0). user: The user ID. + additional_properties: Provider-specific additional properties, can also be passed as kwargs. + **kwargs: Additional properties to include in additional_properties. """ # Validate numeric constraints and convert types as needed if frequency_penalty is not None: @@ -3055,7 +3060,12 @@ def __init__( if max_tokens is not None and max_tokens <= 0: raise ValueError("max_tokens must be greater than 0") - self.additional_properties = additional_properties or {} + if additional_properties is None: + additional_properties = {} + if kwargs: + additional_properties.update(kwargs) + + self.additional_properties = cast(dict[str, Any], additional_properties) self.model_id = model_id self.allow_multiple_tool_calls = allow_multiple_tool_calls self.conversation_id = conversation_id @@ -3128,48 +3138,11 @@ def _validate_tool_mode( case "none": return ToolMode.NONE case _: - raise ValidationError(f"Invalid tool choice: {tool_choice}") + raise ContentError(f"Invalid tool choice: {tool_choice}") if isinstance(tool_choice, (dict, Mapping)): return ToolMode.from_dict(tool_choice) # type: ignore return tool_choice - def to_provider_settings(self, *, by_alias: bool = True, exclude: set[str] | None = None) -> dict[str, Any]: - """Convert the ChatOptions to a dictionary suitable for provider requests. - - Keyword Args: - by_alias: Use alias names for fields if True. - exclude: Additional keys to exclude from the output. - - Returns: - Dictionary of settings for provider. - """ - default_exclude = {"additional_properties", "type"} # 'type' is for serialization, not API calls - # No tool choice if no tools are defined - if self.tools is None or len(self.tools) == 0: - default_exclude.add("tool_choice") - # No metadata and logit bias if they are empty - # Prevents 400 error - if not self.logit_bias: - default_exclude.add("logit_bias") - if not self.metadata: - default_exclude.add("metadata") - - merged_exclude = default_exclude if exclude is None else default_exclude | set(exclude) - - settings = self.to_dict(exclude_none=True, exclude=merged_exclude) - if by_alias and self.model_id is not None: - settings["model"] = settings.pop("model_id", None) - - # Serialize tool_choice to its string representation for provider settings - if "tool_choice" in settings and isinstance(self.tool_choice, ToolMode): - settings["tool_choice"] = self.tool_choice.serialize_model() - - settings = {k: v for k, v in settings.items() if v is not None} - settings.update(self.additional_properties) - for key in merged_exclude: - settings.pop(key, None) - return settings - def __and__(self, other: object) -> "ChatOptions": """Combines two ChatOptions instances. @@ -3189,14 +3162,13 @@ def __and__(self, other: object) -> "ChatOptions": combined.tools = list(self.tools) if self.tools else None combined.logit_bias = dict(self.logit_bias) if self.logit_bias else None combined.metadata = dict(self.metadata) if self.metadata else None - combined.additional_properties = dict(self.additional_properties) combined.response_format = response_format # Apply scalar and mapping updates from the other options updated_data = other.to_dict(exclude_none=True, exclude={"tools"}) logit_bias = updated_data.pop("logit_bias", {}) metadata = updated_data.pop("metadata", {}) - additional_properties = updated_data.pop("additional_properties", {}) + additional_properties: dict[str, Any] = updated_data.pop("additional_properties", {}) for key, value in updated_data.items(): setattr(combined, key, value) @@ -3205,10 +3177,18 @@ def __and__(self, other: object) -> "ChatOptions": # Preserve response_format from other if it exists, otherwise keep self's if other.response_format is not None: combined.response_format = other.response_format - combined.instructions = "\n".join([combined.instructions or "", other.instructions or ""]) - combined.logit_bias = {**(combined.logit_bias or {}), **logit_bias} - combined.metadata = {**(combined.metadata or {}), **metadata} - combined.additional_properties = {**(combined.additional_properties or {}), **additional_properties} + if other.instructions: + combined.instructions = "\n".join([combined.instructions or "", other.instructions or ""]) + + combined.logit_bias = ( + {**(combined.logit_bias or {}), **logit_bias} if logit_bias or combined.logit_bias else None + ) + combined.metadata = {**(combined.metadata or {}), **metadata} if metadata or combined.metadata else None + if combined.additional_properties and additional_properties: + combined.additional_properties.update(additional_properties) + else: + if additional_properties: + combined.additional_properties = additional_properties if other_tools: if combined.tools is None: combined.tools = list(other_tools) diff --git a/python/packages/core/agent_framework/_workflows/__init__.py b/python/packages/core/agent_framework/_workflows/__init__.py index f9e292465a..5af4049128 100644 --- a/python/packages/core/agent_framework/_workflows/__init__.py +++ b/python/packages/core/agent_framework/_workflows/__init__.py @@ -48,9 +48,6 @@ ) from ._executor import ( Executor, - RequestInfoExecutor, - RequestInfoMessage, - RequestResponse, handler, ) from ._function_executor import FunctionExecutor, executor @@ -76,6 +73,11 @@ MagenticStartMessage, StandardMagenticManager, ) +from ._request_info_executor import ( + RequestInfoExecutor, + RequestInfoMessage, + RequestResponse, +) from ._runner import Runner from ._runner_context import ( InProcRunnerContext, diff --git a/python/packages/core/agent_framework/_workflows/__init__.pyi b/python/packages/core/agent_framework/_workflows/__init__.pyi index 5b0dcc799d..db8f87eb4e 100644 --- a/python/packages/core/agent_framework/_workflows/__init__.pyi +++ b/python/packages/core/agent_framework/_workflows/__init__.pyi @@ -46,9 +46,6 @@ from ._events import ( ) from ._executor import ( Executor, - RequestInfoExecutor, - RequestInfoMessage, - RequestResponse, handler, ) from ._function_executor import FunctionExecutor, executor @@ -74,6 +71,11 @@ from ._magentic import ( MagenticStartMessage, StandardMagenticManager, ) +from ._request_info_executor import ( + RequestInfoExecutor, + RequestInfoMessage, + RequestResponse, +) from ._runner import Runner from ._runner_context import ( InProcRunnerContext, diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 4d40eb6168..b92c845a4d 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -104,9 +104,6 @@ async def _run_agent_and_emit(self, ctx: WorkflowContext[AgentExecutorResponse, self._cache, thread=self._agent_thread, ): - if not update.text: - # Skip empty updates (no textual or structural content) - continue updates.append(update) await ctx.add_event(AgentRunUpdateEvent(self.id, update)) diff --git a/python/packages/core/agent_framework/_workflows/_events.py b/python/packages/core/agent_framework/_workflows/_events.py index 90a2f1f174..58e699e2b4 100644 --- a/python/packages/core/agent_framework/_workflows/_events.py +++ b/python/packages/core/agent_framework/_workflows/_events.py @@ -11,7 +11,7 @@ from agent_framework import AgentRunResponse, AgentRunResponseUpdate if TYPE_CHECKING: - from ._executor import RequestInfoMessage + from ._request_info_executor import RequestInfoMessage class WorkflowEventSource(str, Enum): diff --git a/python/packages/core/agent_framework/_workflows/_executor.py b/python/packages/core/agent_framework/_workflows/_executor.py index 80dd30e402..1f822e870a 100644 --- a/python/packages/core/agent_framework/_workflows/_executor.py +++ b/python/packages/core/agent_framework/_workflows/_executor.py @@ -2,61 +2,29 @@ import contextlib import functools -import importlib import inspect -import json import logging -import uuid -from collections.abc import Awaitable, Callable, Iterable, Mapping, Sequence -from dataclasses import asdict, dataclass, field, fields, is_dataclass -from textwrap import shorten -from typing import Any, ClassVar, Generic, TypeVar, cast +from collections.abc import Awaitable, Callable +from typing import Any, TypeVar from ..observability import create_processing_span -from ._checkpoint import WorkflowCheckpoint from ._events import ( ExecutorCompletedEvent, ExecutorFailedEvent, ExecutorInvokedEvent, - RequestInfoEvent, WorkflowErrorDetails, _framework_event_origin, # type: ignore[reportPrivateUsage] ) from ._model_utils import DictConvertible -from ._runner_context import Message, RunnerContext, _decode_checkpoint_value # type: ignore +from ._runner_context import Message, RunnerContext # type: ignore from ._shared_state import SharedState from ._typing_utils import is_instance_of from ._workflow_context import WorkflowContext, validate_function_signature logger = logging.getLogger(__name__) -# region Executor - - -@dataclass -class PendingRequestDetails: - """Lightweight information about a pending request captured in a checkpoint.""" - - request_id: str - prompt: str | None = None - draft: str | None = None - iteration: int | None = None - source_executor_id: str | None = None - original_request: "RequestInfoMessage | dict[str, Any] | None" = None - - -@dataclass -class WorkflowCheckpointSummary: - """Human-readable summary of a workflow checkpoint.""" - - checkpoint_id: str - iteration_count: int - targets: list[str] - executor_states: list[str] - status: str - draft_preview: str | None - pending_requests: list[PendingRequestDetails] +# region Executor class Executor(DictConvertible): """Base class for all workflow executors that process messages and perform computations. @@ -542,802 +510,3 @@ async def wrapper(self: ExecutorT, message: Any, ctx: ContextT) -> Any: # endregion: Handler Decorator - - -# region Request/Response Types -@dataclass -class RequestInfoMessage: - """Base class for all request messages in workflows. - - Any message that should be routed to the RequestInfoExecutor for external - handling must inherit from this class. This ensures type safety and makes - the request/response pattern explicit. - """ - - request_id: str = field(default_factory=lambda: str(uuid.uuid4())) - """Unique identifier for correlating requests and responses.""" - - source_executor_id: str | None = None - """ID of the executor expecting a response to this request. - May differ from the executor that sent the request if intercepted and forwarded.""" - - -TRequest = TypeVar("TRequest", bound="RequestInfoMessage") -TResponse = TypeVar("TResponse") - - -@dataclass -class RequestResponse(Generic[TRequest, TResponse]): - """Response type for request/response correlation in workflows. - - This type is used by RequestInfoExecutor to create correlated responses - that include the original request context for proper message routing. - """ - - data: TResponse - """The response data returned from handling the request.""" - - original_request: TRequest - """The original request that this response corresponds to.""" - - request_id: str - """The ID of the original request.""" - - -# endregion: Request/Response Types - - -# region Request Info Executor -class RequestInfoExecutor(Executor): - """Built-in executor that handles request/response patterns in workflows. - - This executor acts as a gateway for external information requests. When it receives - a request message, it saves the request details and emits a RequestInfoEvent. When - a response is provided externally, it emits the response as a message. - """ - - _PENDING_SHARED_STATE_KEY: ClassVar[str] = "_af_pending_request_info" - - def __init__(self, id: str): - """Initialize the RequestInfoExecutor with a unique ID. - - Args: - id: Unique ID for this RequestInfoExecutor. - """ - super().__init__(id=id) - self._request_events: dict[str, RequestInfoEvent] = {} - - @handler - async def run(self, message: RequestInfoMessage, ctx: WorkflowContext) -> None: - """Run the RequestInfoExecutor with the given message.""" - # Use source_executor_id from message if available, otherwise fall back to context - source_executor_id = message.source_executor_id or ctx.get_source_executor_id() - - event = RequestInfoEvent( - request_id=message.request_id, - source_executor_id=source_executor_id, - request_type=type(message), - request_data=message, - ) - self._request_events[message.request_id] = event - await self._record_pending_request_snapshot(message, source_executor_id, ctx) - await ctx.add_event(event) - - async def handle_response( - self, - response_data: Any, - request_id: str, - ctx: WorkflowContext[RequestResponse[RequestInfoMessage, Any]], - ) -> None: - """Handle a response to a request. - - Args: - request_id: The ID of the request to which this response corresponds. - response_data: The data returned in the response. - ctx: The workflow context for sending the response. - """ - event = self._request_events.get(request_id) - if event is None: - event = await self._rehydrate_request_event(request_id, ctx) - if event is None: - raise ValueError(f"No request found with ID: {request_id}") - - self._request_events.pop(request_id, None) - - # Create a correlated response that includes both the response data and original request - if not isinstance(event.data, RequestInfoMessage): - raise TypeError(f"Expected RequestInfoMessage, got {type(event.data)}") - correlated_response = RequestResponse(data=response_data, original_request=event.data, request_id=request_id) - await ctx.send_message(correlated_response, target_id=event.source_executor_id) - - await self._clear_pending_request_snapshot(request_id, ctx) - - async def _record_pending_request_snapshot( - self, - request: RequestInfoMessage, - source_executor_id: str, - ctx: WorkflowContext[Any], - ) -> None: - snapshot = self._build_request_snapshot(request, source_executor_id) - - pending = await self._load_pending_request_state(ctx) - pending[request.request_id] = snapshot - await self._persist_pending_request_state(pending, ctx) - await self._write_executor_state(ctx, pending) - - async def _clear_pending_request_snapshot(self, request_id: str, ctx: WorkflowContext[Any]) -> None: - pending = await self._load_pending_request_state(ctx) - if request_id in pending: - pending.pop(request_id, None) - await self._persist_pending_request_state(pending, ctx) - await self._write_executor_state(ctx, pending) - - async def _load_pending_request_state(self, ctx: WorkflowContext[Any]) -> dict[str, Any]: - try: - existing = await ctx.get_shared_state(self._PENDING_SHARED_STATE_KEY) - except KeyError: - return {} - except Exception as exc: # pragma: no cover - transport specific - logger.warning(f"RequestInfoExecutor {self.id} failed to read pending request state: {exc}") - return {} - - if not isinstance(existing, dict): - if existing not in (None, {}): - logger.warning( - f"RequestInfoExecutor {self.id} encountered non-dict pending state " - f"({type(existing).__name__}); resetting." - ) - return {} - - return dict(existing) # type: ignore[arg-type] - - async def _persist_pending_request_state(self, pending: dict[str, Any], ctx: WorkflowContext[Any]) -> None: - await self._safe_set_shared_state(ctx, pending) - await self._safe_set_runner_state(ctx, pending) - - async def _safe_set_shared_state(self, ctx: WorkflowContext[Any], pending: dict[str, Any]) -> None: - try: - await ctx.set_shared_state(self._PENDING_SHARED_STATE_KEY, pending) - except Exception as exc: # pragma: no cover - transport specific - logger.warning(f"RequestInfoExecutor {self.id} failed to update shared pending state: {exc}") - - async def _safe_set_runner_state(self, ctx: WorkflowContext[Any], pending: dict[str, Any]) -> None: - try: - await ctx.set_state({"pending_requests": pending}) - except Exception as exc: # pragma: no cover - transport specific - logger.warning(f"RequestInfoExecutor {self.id} failed to update runner state with pending requests: {exc}") - - def snapshot_state(self) -> dict[str, Any]: - """Serialize pending requests so checkpoint restoration can resume seamlessly.""" - - def _encode_event(event: RequestInfoEvent) -> dict[str, Any]: - request_data = event.data - payload: dict[str, Any] - data_cls = request_data.__class__ if request_data is not None else type(None) - - payload = self._encode_request_payload(request_data, data_cls) - - return { - "source_executor_id": event.source_executor_id, - "request_type": f"{event.request_type.__module__}:{event.request_type.__qualname__}", - "request_data": payload, - } - - return { - "request_events": {rid: _encode_event(event) for rid, event in self._request_events.items()}, - } - - def _encode_request_payload(self, request_data: RequestInfoMessage | None, data_cls: type[Any]) -> dict[str, Any]: - if request_data is None or isinstance(request_data, (str, int, float, bool)): - return { - "kind": "raw", - "type": f"{data_cls.__module__}:{data_cls.__qualname__}", - "value": request_data, - } - - if is_dataclass(request_data) and not isinstance(request_data, type): - dataclass_instance = cast(Any, request_data) - safe_value = self._make_json_safe(asdict(dataclass_instance)) - return { - "kind": "dataclass", - "type": f"{data_cls.__module__}:{data_cls.__qualname__}", - "value": safe_value, - } - - to_dict_fn = getattr(request_data, "to_dict", None) - if callable(to_dict_fn): - try: - dumped = to_dict_fn() - except TypeError: - dumped = to_dict_fn() - safe_value = self._make_json_safe(dumped) - return { - "kind": "dict", - "type": f"{data_cls.__module__}:{data_cls.__qualname__}", - "value": safe_value, - } - - to_json_fn = getattr(request_data, "to_json", None) - if callable(to_json_fn): - try: - dumped = to_json_fn() - except TypeError: - dumped = to_json_fn() - converted = dumped - if isinstance(dumped, (str, bytes, bytearray)): - decoded: str | bytes | bytearray - if isinstance(dumped, (bytes, bytearray)): - try: - decoded = dumped.decode() - except Exception: - decoded = dumped - else: - decoded = dumped - try: - converted = json.loads(decoded) - except Exception: - converted = decoded - safe_value = self._make_json_safe(converted) - return { - "kind": "dict" if isinstance(converted, dict) else "json", - "type": f"{data_cls.__module__}:{data_cls.__qualname__}", - "value": safe_value, - } - - details = self._serialise_request_details(request_data) - if details is not None: - safe_value = self._make_json_safe(details) - return { - "kind": "raw", - "type": f"{data_cls.__module__}:{data_cls.__qualname__}", - "value": safe_value, - } - - safe_value = self._make_json_safe(request_data) - return { - "kind": "raw", - "type": f"{data_cls.__module__}:{data_cls.__qualname__}", - "value": safe_value, - } - - def restore_state(self, state: dict[str, Any]) -> None: - """Restore pending request bookkeeping from checkpoint state.""" - self._request_events.clear() - stored_events = state.get("request_events", {}) - - for request_id, payload in stored_events.items(): - request_type_qual = payload.get("request_type", "") - try: - request_type = self._import_qualname(request_type_qual) - except Exception as exc: # pragma: no cover - defensive fallback - logger.debug( - "RequestInfoExecutor %s failed to import %s during restore: %s", - self.id, - request_type_qual, - exc, - ) - request_type = RequestInfoMessage - request_data_meta = payload.get("request_data", {}) - request_data = self._decode_request_data(request_data_meta) - event = RequestInfoEvent( - request_id=request_id, - source_executor_id=payload.get("source_executor_id", ""), - request_type=request_type, - request_data=request_data, - ) - self._request_events[request_id] = event - - @staticmethod - def _import_qualname(qualname: str) -> type[Any]: - module_name, _, type_name = qualname.partition(":") - if not module_name or not type_name: - raise ValueError(f"Invalid qualified name: {qualname}") - module = importlib.import_module(module_name) - attr: Any = module - for part in type_name.split("."): - attr = getattr(attr, part) - if not isinstance(attr, type): - raise TypeError(f"Resolved object is not a type: {qualname}") - return attr - - def _decode_request_data(self, metadata: dict[str, Any]) -> RequestInfoMessage: - kind = metadata.get("kind") - type_name = metadata.get("type", "") - value: Any = metadata.get("value", {}) - if type_name: - try: - imported = self._import_qualname(type_name) - except Exception as exc: # pragma: no cover - defensive fallback - logger.debug( - "RequestInfoExecutor %s failed to import %s during decode: %s", - self.id, - type_name, - exc, - ) - imported = RequestInfoMessage - else: - imported = RequestInfoMessage - target_cls: type[RequestInfoMessage] - if isinstance(imported, type) and issubclass(imported, RequestInfoMessage): - target_cls = imported - else: - target_cls = RequestInfoMessage - - if kind == "dataclass" and isinstance(value, dict): - with contextlib.suppress(TypeError): - return target_cls(**value) # type: ignore[arg-type] - - # Backwards-compat handling for checkpoints that used to store pydantic as "dict" - if kind in {"dict", "pydantic", "json"} and isinstance(value, dict): - from_dict = getattr(target_cls, "from_dict", None) - if callable(from_dict): - with contextlib.suppress(Exception): - return cast(RequestInfoMessage, from_dict(value)) - - if kind == "json" and isinstance(value, str): - from_json = getattr(target_cls, "from_json", None) - if callable(from_json): - with contextlib.suppress(Exception): - return cast(RequestInfoMessage, from_json(value)) - with contextlib.suppress(Exception): - parsed = json.loads(value) - if isinstance(parsed, dict): - return self._decode_request_data({"kind": "dict", "type": type_name, "value": parsed}) - - if isinstance(value, dict): - with contextlib.suppress(TypeError): - return target_cls(**value) # type: ignore[arg-type] - instance = object.__new__(target_cls) - instance.__dict__.update(value) # type: ignore[arg-type] - return instance - - with contextlib.suppress(Exception): - return target_cls() - return RequestInfoMessage() - - async def _write_executor_state(self, ctx: WorkflowContext[Any], pending: dict[str, Any]) -> None: - state = self.snapshot_state() - state["pending_requests"] = pending - try: - await ctx.set_state(state) - except Exception as exc: # pragma: no cover - transport specific - logger.warning(f"RequestInfoExecutor {self.id} failed to persist executor state: {exc}") - - def _build_request_snapshot( - self, - request: RequestInfoMessage, - source_executor_id: str, - ) -> dict[str, Any]: - snapshot: dict[str, Any] = { - "request_id": request.request_id, - "source_executor_id": source_executor_id, - "request_type": f"{type(request).__module__}:{type(request).__name__}", - "summary": repr(request), - } - - details = self._serialise_request_details(request) - if details: - snapshot["details"] = details - for key in ("prompt", "draft", "iteration"): - if key in details and key not in snapshot: - snapshot[key] = details[key] - - return snapshot - - def _serialise_request_details(self, request: RequestInfoMessage) -> dict[str, Any] | None: - if is_dataclass(request): - data = self._make_json_safe(asdict(request)) - if isinstance(data, dict): - return cast(dict[str, Any], data) - return None - - to_dict = getattr(request, "to_dict", None) - if callable(to_dict): - try: - dump = self._make_json_safe(to_dict()) - except TypeError: - dump = self._make_json_safe(to_dict()) - if isinstance(dump, dict): - return cast(dict[str, Any], dump) - return None - - to_json = getattr(request, "to_json", None) - if callable(to_json): - try: - raw = to_json() - except TypeError: - raw = to_json() - converted = raw - if isinstance(raw, (str, bytes, bytearray)): - decoded: str | bytes | bytearray - if isinstance(raw, (bytes, bytearray)): - try: - decoded = raw.decode() - except Exception: - decoded = raw - else: - decoded = raw - try: - converted = json.loads(decoded) - except Exception: - converted = decoded - dump = self._make_json_safe(converted) - if isinstance(dump, dict): - return cast(dict[str, Any], dump) - return None - - attrs = getattr(request, "__dict__", None) - if isinstance(attrs, dict): - cleaned = self._make_json_safe(attrs) - if isinstance(cleaned, dict): - return cast(dict[str, Any], cleaned) - - return None - - def _make_json_safe(self, value: Any) -> Any: - if value is None or isinstance(value, (str, int, float, bool)): - return value - if isinstance(value, Mapping): - safe_dict: dict[str, Any] = {} - for key, val in value.items(): # type: ignore[attr-defined] - safe_dict[str(key)] = self._make_json_safe(val) # type: ignore[arg-type] - return safe_dict - if isinstance(value, Sequence) and not isinstance(value, (str, bytes, bytearray)): - return [self._make_json_safe(item) for item in value] # type: ignore[misc] - return repr(value) - - async def has_pending_request(self, request_id: str, ctx: WorkflowContext[Any]) -> bool: - if request_id in self._request_events: - return True - snapshot = await self._get_pending_request_snapshot(request_id, ctx) - return snapshot is not None - - async def _rehydrate_request_event( - self, - request_id: str, - ctx: WorkflowContext[Any], - ) -> RequestInfoEvent | None: - snapshot = await self._get_pending_request_snapshot(request_id, ctx) - if snapshot is None: - return None - - source_executor_id = snapshot.get("source_executor_id") - if not isinstance(source_executor_id, str) or not source_executor_id: - return None - - request = self._construct_request_from_snapshot(snapshot) - if request is None: - return None - - event = RequestInfoEvent( - request_id=request_id, - source_executor_id=source_executor_id, - request_type=type(request), - request_data=request, - ) - self._request_events[request_id] = event - return event - - async def _get_pending_request_snapshot(self, request_id: str, ctx: WorkflowContext[Any]) -> dict[str, Any] | None: - pending = await self._collect_pending_request_snapshots(ctx) - snapshot = pending.get(request_id) - if snapshot is None: - return None - return snapshot - - async def _collect_pending_request_snapshots(self, ctx: WorkflowContext[Any]) -> dict[str, dict[str, Any]]: - combined: dict[str, dict[str, Any]] = {} - - try: - shared_pending = await ctx.get_shared_state(self._PENDING_SHARED_STATE_KEY) - except KeyError: - shared_pending = None - except Exception as exc: # pragma: no cover - transport specific - logger.warning(f"RequestInfoExecutor {self.id} failed to read shared pending state during rehydrate: {exc}") - shared_pending = None - - if isinstance(shared_pending, dict): - for key, value in shared_pending.items(): # type: ignore[attr-defined] - if isinstance(key, str) and isinstance(value, dict): - combined[key] = cast(dict[str, Any], value) - - try: - state = await ctx.get_state() - except Exception as exc: # pragma: no cover - transport specific - logger.warning(f"RequestInfoExecutor {self.id} failed to read runner state during rehydrate: {exc}") - state = None - - if isinstance(state, dict): - state_pending = state.get("pending_requests") - if isinstance(state_pending, dict): - for key, value in state_pending.items(): # type: ignore[attr-defined] - if isinstance(key, str) and isinstance(value, dict) and key not in combined: - combined[key] = cast(dict[str, Any], value) - - return combined - - def _construct_request_from_snapshot(self, snapshot: dict[str, Any]) -> RequestInfoMessage | None: - details_raw = snapshot.get("details") - details: dict[str, Any] = cast(dict[str, Any], details_raw) if isinstance(details_raw, dict) else {} - - request_cls: type[RequestInfoMessage] = RequestInfoMessage - request_type_str = snapshot.get("request_type") - if isinstance(request_type_str, str) and ":" in request_type_str: - module_name, class_name = request_type_str.split(":", 1) - try: - module = importlib.import_module(module_name) - candidate = getattr(module, class_name) - if isinstance(candidate, type) and issubclass(candidate, RequestInfoMessage): - request_cls = candidate - except Exception as exc: - logger.warning(f"RequestInfoExecutor {self.id} could not import {module_name}.{class_name}: {exc}") - request_cls = RequestInfoMessage - - request: RequestInfoMessage | None = self._instantiate_request(request_cls, details) - - if request is None and request_cls is not RequestInfoMessage: - request = self._instantiate_request(RequestInfoMessage, details) - - if request is None: - logger.warning( - f"RequestInfoExecutor {self.id} could not reconstruct request " - f"{request_type_str or RequestInfoMessage.__name__} from snapshot keys {sorted(details.keys())}" - ) - return None - - for key, value in details.items(): - if key == "request_id": - continue - try: - setattr(request, key, value) - except Exception as exc: - logger.debug( - f"RequestInfoExecutor {self.id} could not set attribute {key} on {type(request).__name__}: {exc}" - ) - continue - - snapshot_request_id = snapshot.get("request_id") - if isinstance(snapshot_request_id, str) and snapshot_request_id: - try: - request.request_id = snapshot_request_id - except Exception as exc: - logger.debug( - f"RequestInfoExecutor {self.id} could not apply snapshot " - f"request_id to {type(request).__name__}: {exc}" - ) - - return request - - def _instantiate_request( - self, - request_cls: type[RequestInfoMessage], - details: dict[str, Any], - ) -> RequestInfoMessage | None: - try: - from_dict = getattr(request_cls, "from_dict", None) - if callable(from_dict): - return cast(RequestInfoMessage, from_dict(details)) - except (TypeError, ValueError) as exc: - logger.debug(f"RequestInfoExecutor {self.id} failed to hydrate {request_cls.__name__} via from_dict: {exc}") - except Exception as exc: - logger.warning( - f"RequestInfoExecutor {self.id} encountered unexpected error during " - f"{request_cls.__name__}.from_dict: {exc}" - ) - - if is_dataclass(request_cls): - try: - field_names = {f.name for f in fields(request_cls)} - ctor_kwargs = {name: details[name] for name in field_names if name in details} - return request_cls(**ctor_kwargs) - except (TypeError, ValueError) as exc: - logger.debug( - f"RequestInfoExecutor {self.id} could not instantiate dataclass " - f"{request_cls.__name__} with snapshot data: {exc}" - ) - except Exception as exc: - logger.warning( - f"RequestInfoExecutor {self.id} encountered unexpected error " - f"constructing dataclass {request_cls.__name__}: {exc}" - ) - - try: - instance = request_cls() - except Exception as exc: - logger.warning( - f"RequestInfoExecutor {self.id} could not instantiate {request_cls.__name__} without arguments: {exc}" - ) - return None - - for key, value in details.items(): - if key == "request_id": - continue - try: - setattr(instance, key, value) - except Exception as exc: - logger.debug( - f"RequestInfoExecutor {self.id} could not set attribute {key} on " - f"{request_cls.__name__} during instantiation: {exc}" - ) - continue - - return instance - - @staticmethod - def pending_requests_from_checkpoint( - checkpoint: WorkflowCheckpoint, - *, - request_executor_ids: Iterable[str] | None = None, - ) -> list[PendingRequestDetails]: - executor_filter: set[str] | None = None - if request_executor_ids is not None: - executor_filter = {str(value) for value in request_executor_ids} - - pending: dict[str, PendingRequestDetails] = {} - - shared_map = checkpoint.shared_state.get(RequestInfoExecutor._PENDING_SHARED_STATE_KEY) - if isinstance(shared_map, Mapping): - for request_id, snapshot in shared_map.items(): # type: ignore[attr-defined] - RequestInfoExecutor._merge_snapshot(pending, str(request_id), snapshot) # type: ignore[arg-type] - - for state in checkpoint.executor_states.values(): - if not isinstance(state, Mapping): - continue - inner = state.get("pending_requests") - if isinstance(inner, Mapping): - for request_id, snapshot in inner.items(): # type: ignore[attr-defined] - RequestInfoExecutor._merge_snapshot(pending, str(request_id), snapshot) # type: ignore[arg-type] - - for source_id, message_list in checkpoint.messages.items(): - if executor_filter is not None and source_id not in executor_filter: - continue - if not isinstance(message_list, list): - continue - for message in message_list: - if not isinstance(message, Mapping): - continue - payload = _decode_checkpoint_value(message.get("data")) - RequestInfoExecutor._merge_message_payload(pending, payload, message) - - return list(pending.values()) - - @staticmethod - def checkpoint_summary( - checkpoint: WorkflowCheckpoint, - *, - request_executor_ids: Iterable[str] | None = None, - preview_width: int = 70, - ) -> WorkflowCheckpointSummary: - targets = sorted(checkpoint.messages.keys()) - executor_states = sorted(checkpoint.executor_states.keys()) - pending = RequestInfoExecutor.pending_requests_from_checkpoint( - checkpoint, request_executor_ids=request_executor_ids - ) - - draft_preview: str | None = None - for entry in pending: - if entry.draft: - draft_preview = shorten(entry.draft, width=preview_width, placeholder="…") - break - - status = "idle" - if pending: - status = "awaiting human response" - elif not checkpoint.messages and "finalise" in executor_states: - status = "completed" - elif checkpoint.messages: - status = "awaiting next superstep" - elif request_executor_ids is not None and any(tid in targets for tid in request_executor_ids): - status = "awaiting request delivery" - - return WorkflowCheckpointSummary( - checkpoint_id=checkpoint.checkpoint_id, - iteration_count=checkpoint.iteration_count, - targets=targets, - executor_states=executor_states, - status=status, - draft_preview=draft_preview, - pending_requests=pending, - ) - - @staticmethod - def _merge_snapshot( - pending: dict[str, PendingRequestDetails], - request_id: str, - snapshot: Any, - ) -> None: - if not request_id or not isinstance(snapshot, Mapping): - return - - details = pending.setdefault(request_id, PendingRequestDetails(request_id=request_id)) - - RequestInfoExecutor._apply_update( - details, - prompt=snapshot.get("prompt"), # type: ignore[attr-defined] - draft=snapshot.get("draft"), # type: ignore[attr-defined] - iteration=snapshot.get("iteration"), # type: ignore[attr-defined] - source_executor_id=snapshot.get("source_executor_id"), # type: ignore[attr-defined] - ) - - extra = snapshot.get("details") # type: ignore[attr-defined] - if isinstance(extra, Mapping): - RequestInfoExecutor._apply_update( - details, - prompt=extra.get("prompt"), # type: ignore[attr-defined] - draft=extra.get("draft"), # type: ignore[attr-defined] - iteration=extra.get("iteration"), # type: ignore[attr-defined] - ) - - @staticmethod - def _merge_message_payload( - pending: dict[str, PendingRequestDetails], - payload: Any, - raw_message: Mapping[str, Any], - ) -> None: - if isinstance(payload, RequestResponse): - request_id = payload.request_id or RequestInfoExecutor._get_field(payload.original_request, "request_id") # type: ignore[arg-type] - if not request_id: - return - details = pending.setdefault(request_id, PendingRequestDetails(request_id=request_id)) - RequestInfoExecutor._apply_update( - details, - prompt=RequestInfoExecutor._get_field(payload.original_request, "prompt"), # type: ignore[arg-type] - draft=RequestInfoExecutor._get_field(payload.original_request, "draft"), # type: ignore[arg-type] - iteration=RequestInfoExecutor._get_field(payload.original_request, "iteration"), # type: ignore[arg-type] - source_executor_id=raw_message.get("source_id"), - original_request=payload.original_request, # type: ignore[arg-type] - ) - elif isinstance(payload, RequestInfoMessage): - request_id = getattr(payload, "request_id", None) - if not request_id: - return - details = pending.setdefault(request_id, PendingRequestDetails(request_id=request_id)) - RequestInfoExecutor._apply_update( - details, - prompt=getattr(payload, "prompt", None), - draft=getattr(payload, "draft", None), - iteration=getattr(payload, "iteration", None), - source_executor_id=raw_message.get("source_id"), - original_request=payload, - ) - - @staticmethod - def _apply_update( - details: PendingRequestDetails, - *, - prompt: Any = None, - draft: Any = None, - iteration: Any = None, - source_executor_id: Any = None, - original_request: Any = None, - ) -> None: - if prompt and not details.prompt: - details.prompt = str(prompt) - if draft and not details.draft: - details.draft = str(draft) - if iteration is not None and details.iteration is None: - coerced = RequestInfoExecutor._coerce_int(iteration) - if coerced is not None: - details.iteration = coerced - if source_executor_id and not details.source_executor_id: - details.source_executor_id = str(source_executor_id) - if original_request is not None and details.original_request is None: - details.original_request = original_request - - @staticmethod - def _get_field(obj: Any, key: str) -> Any: - if obj is None: - return None - if isinstance(obj, Mapping): - return obj.get(key) # type: ignore[attr-defined,return-value] - return getattr(obj, key, None) - - @staticmethod - def _coerce_int(value: Any) -> int | None: - try: - return int(value) - except (TypeError, ValueError): - return None - - -# endregion: Request Info Executor diff --git a/python/packages/core/agent_framework/_workflows/_magentic.py b/python/packages/core/agent_framework/_workflows/_magentic.py index 0f61f1ca7e..3ee1c10690 100644 --- a/python/packages/core/agent_framework/_workflows/_magentic.py +++ b/python/packages/core/agent_framework/_workflows/_magentic.py @@ -27,8 +27,9 @@ from ._checkpoint import CheckpointStorage, WorkflowCheckpoint from ._events import WorkflowEvent -from ._executor import Executor, RequestInfoMessage, RequestResponse, handler +from ._executor import Executor, handler from ._model_utils import DictConvertible, encode_value +from ._request_info_executor import RequestInfoMessage, RequestResponse from ._workflow import Workflow, WorkflowBuilder, WorkflowRunResult from ._workflow_context import WorkflowContext @@ -971,7 +972,6 @@ def __init__( self._require_plan_signoff = require_plan_signoff self._plan_review_round = 0 self._max_plan_review_rounds = max_plan_review_rounds - self._inner_loop_lock = asyncio.Lock() # Registry of agent executors for internal coordination (e.g., resets) self._agent_executors = {} # Terminal state marker to stop further processing after completion/limits @@ -1103,8 +1103,6 @@ async def handle_start_message( task=message.task, participant_descriptions=self._participants, ) - # Record the original user task in orchestrator context (no broadcast) - self._context.chat_history.append(message.task) self._state_restored = True # Non-streaming callback for the orchestrator receipt of the task if self._message_callback: @@ -1316,10 +1314,10 @@ async def _run_inner_loop( """Run the inner orchestration loop. Coordination phase. Serialized with a lock.""" if self._context is None or self._task_ledger is None: raise RuntimeError("Context or task ledger not initialized") - async with self._inner_loop_lock: - await self._run_inner_loop_locked(context) - async def _run_inner_loop_locked( + await self._run_inner_loop_helper(context) + + async def _run_inner_loop_helper( self, context: WorkflowContext[MagenticResponseMessage | MagenticRequestMessage, ChatMessage], ) -> None: @@ -1939,7 +1937,7 @@ async def _on_agent_delta(agent_id: str, update: AgentRunResponseUpdate, is_fina workflow_builder = WorkflowBuilder().set_start_executor(orchestrator_executor) if self._enable_plan_review: - from ._executor import RequestInfoExecutor + from ._request_info_executor import RequestInfoExecutor request_info = RequestInfoExecutor(id="magentic_plan_review") workflow_builder = ( diff --git a/python/packages/core/agent_framework/_workflows/_request_info_executor.py b/python/packages/core/agent_framework/_workflows/_request_info_executor.py new file mode 100644 index 0000000000..ef6eb3fc22 --- /dev/null +++ b/python/packages/core/agent_framework/_workflows/_request_info_executor.py @@ -0,0 +1,841 @@ +# Copyright (c) Microsoft. All rights reserved. + +import contextlib +import importlib +import json +import logging +import uuid +from collections.abc import Iterable, Mapping, Sequence +from dataclasses import asdict, dataclass, field, fields, is_dataclass +from textwrap import shorten +from typing import Any, ClassVar, Generic, TypeVar, cast + +from ._checkpoint import WorkflowCheckpoint +from ._events import ( + RequestInfoEvent, # type: ignore[reportPrivateUsage] +) +from ._executor import Executor, handler +from ._runner_context import _decode_checkpoint_value # type: ignore +from ._workflow_context import WorkflowContext + +logger = logging.getLogger(__name__) + + +@dataclass +class PendingRequestDetails: + """Lightweight information about a pending request captured in a checkpoint.""" + + request_id: str + prompt: str | None = None + draft: str | None = None + iteration: int | None = None + source_executor_id: str | None = None + original_request: "RequestInfoMessage | dict[str, Any] | None" = None + + +@dataclass +class WorkflowCheckpointSummary: + """Human-readable summary of a workflow checkpoint.""" + + checkpoint_id: str + iteration_count: int + targets: list[str] + executor_states: list[str] + status: str + draft_preview: str | None + pending_requests: list[PendingRequestDetails] + + +@dataclass +class RequestInfoMessage: + """Base class for all request messages in workflows. + + Any message that should be routed to the RequestInfoExecutor for external + handling must inherit from this class. This ensures type safety and makes + the request/response pattern explicit. + """ + + request_id: str = field(default_factory=lambda: str(uuid.uuid4())) + """Unique identifier for correlating requests and responses.""" + + source_executor_id: str | None = None + """ID of the executor expecting a response to this request. + May differ from the executor that sent the request if intercepted and forwarded.""" + + +TRequest = TypeVar("TRequest", bound="RequestInfoMessage") +TResponse = TypeVar("TResponse") + + +@dataclass +class RequestResponse(Generic[TRequest, TResponse]): + """Response type for request/response correlation in workflows. + + This type is used by RequestInfoExecutor to create correlated responses + that include the original request context for proper message routing. + """ + + data: TResponse + """The response data returned from handling the request.""" + + original_request: TRequest + """The original request that this response corresponds to.""" + + request_id: str + """The ID of the original request.""" + + +# endregion: Request/Response Types + + +# region Request Info Executor +class RequestInfoExecutor(Executor): + """Built-in executor that handles request/response patterns in workflows. + + This executor acts as a gateway for external information requests. When it receives + a request message, it saves the request details and emits a RequestInfoEvent. When + a response is provided externally, it emits the response as a message. + """ + + _PENDING_SHARED_STATE_KEY: ClassVar[str] = "_af_pending_request_info" + + def __init__(self, id: str): + """Initialize the RequestInfoExecutor with a unique ID. + + Args: + id: Unique ID for this RequestInfoExecutor. + """ + super().__init__(id=id) + self._request_events: dict[str, RequestInfoEvent] = {} + + @handler + async def run(self, message: RequestInfoMessage, ctx: WorkflowContext) -> None: + """Run the RequestInfoExecutor with the given message.""" + # Use source_executor_id from message if available, otherwise fall back to context + source_executor_id = message.source_executor_id or ctx.get_source_executor_id() + + event = RequestInfoEvent( + request_id=message.request_id, + source_executor_id=source_executor_id, + request_type=type(message), + request_data=message, + ) + self._request_events[message.request_id] = event + await self._record_pending_request_snapshot(message, source_executor_id, ctx) + await ctx.add_event(event) + + async def handle_response( + self, + response_data: Any, + request_id: str, + ctx: WorkflowContext[RequestResponse[RequestInfoMessage, Any]], + ) -> None: + """Handle a response to a request. + + Args: + request_id: The ID of the request to which this response corresponds. + response_data: The data returned in the response. + ctx: The workflow context for sending the response. + """ + event = self._request_events.get(request_id) + if event is None: + event = await self._rehydrate_request_event(request_id, ctx) + if event is None: + raise ValueError(f"No request found with ID: {request_id}") + + self._request_events.pop(request_id, None) + + # Create a correlated response that includes both the response data and original request + if not isinstance(event.data, RequestInfoMessage): + raise TypeError(f"Expected RequestInfoMessage, got {type(event.data)}") + correlated_response = RequestResponse(data=response_data, original_request=event.data, request_id=request_id) + await ctx.send_message(correlated_response, target_id=event.source_executor_id) + + await self._clear_pending_request_snapshot(request_id, ctx) + + async def _record_pending_request_snapshot( + self, + request: RequestInfoMessage, + source_executor_id: str, + ctx: WorkflowContext[Any], + ) -> None: + snapshot = self._build_request_snapshot(request, source_executor_id) + + pending = await self._load_pending_request_state(ctx) + pending[request.request_id] = snapshot + await self._persist_pending_request_state(pending, ctx) + await self._write_executor_state(ctx, pending) + + async def _clear_pending_request_snapshot(self, request_id: str, ctx: WorkflowContext[Any]) -> None: + pending = await self._load_pending_request_state(ctx) + if request_id in pending: + pending.pop(request_id, None) + await self._persist_pending_request_state(pending, ctx) + await self._write_executor_state(ctx, pending) + + async def _load_pending_request_state(self, ctx: WorkflowContext[Any]) -> dict[str, Any]: + try: + existing = await ctx.get_shared_state(self._PENDING_SHARED_STATE_KEY) + except KeyError: + return {} + except Exception as exc: # pragma: no cover - transport specific + logger.warning(f"RequestInfoExecutor {self.id} failed to read pending request state: {exc}") + return {} + + if not isinstance(existing, dict): + if existing not in (None, {}): + logger.warning( + f"RequestInfoExecutor {self.id} encountered non-dict pending state " + f"({type(existing).__name__}); resetting." + ) + return {} + + return dict(existing) # type: ignore[arg-type] + + async def _persist_pending_request_state(self, pending: dict[str, Any], ctx: WorkflowContext[Any]) -> None: + await self._safe_set_shared_state(ctx, pending) + await self._safe_set_runner_state(ctx, pending) + + async def _safe_set_shared_state(self, ctx: WorkflowContext[Any], pending: dict[str, Any]) -> None: + try: + await ctx.set_shared_state(self._PENDING_SHARED_STATE_KEY, pending) + except Exception as exc: # pragma: no cover - transport specific + logger.warning(f"RequestInfoExecutor {self.id} failed to update shared pending state: {exc}") + + async def _safe_set_runner_state(self, ctx: WorkflowContext[Any], pending: dict[str, Any]) -> None: + try: + await ctx.set_state({"pending_requests": pending}) + except Exception as exc: # pragma: no cover - transport specific + logger.warning(f"RequestInfoExecutor {self.id} failed to update runner state with pending requests: {exc}") + + def snapshot_state(self) -> dict[str, Any]: + """Serialize pending requests so checkpoint restoration can resume seamlessly.""" + + def _encode_event(event: RequestInfoEvent) -> dict[str, Any]: + request_data = event.data + payload: dict[str, Any] + data_cls = request_data.__class__ if request_data is not None else type(None) + + payload = self._encode_request_payload(request_data, data_cls) + + return { + "source_executor_id": event.source_executor_id, + "request_type": f"{event.request_type.__module__}:{event.request_type.__qualname__}", + "request_data": payload, + } + + return { + "request_events": {rid: _encode_event(event) for rid, event in self._request_events.items()}, + } + + def _encode_request_payload(self, request_data: RequestInfoMessage | None, data_cls: type[Any]) -> dict[str, Any]: + if request_data is None or isinstance(request_data, (str, int, float, bool)): + return { + "kind": "raw", + "type": f"{data_cls.__module__}:{data_cls.__qualname__}", + "value": request_data, + } + + if is_dataclass(request_data) and not isinstance(request_data, type): + dataclass_instance = cast(Any, request_data) + safe_value = self._make_json_safe(asdict(dataclass_instance)) + return { + "kind": "dataclass", + "type": f"{data_cls.__module__}:{data_cls.__qualname__}", + "value": safe_value, + } + + to_dict_fn = getattr(request_data, "to_dict", None) + if callable(to_dict_fn): + try: + dumped = to_dict_fn() + except TypeError: + dumped = to_dict_fn() + safe_value = self._make_json_safe(dumped) + return { + "kind": "dict", + "type": f"{data_cls.__module__}:{data_cls.__qualname__}", + "value": safe_value, + } + + to_json_fn = getattr(request_data, "to_json", None) + if callable(to_json_fn): + try: + dumped = to_json_fn() + except TypeError: + dumped = to_json_fn() + converted = dumped + if isinstance(dumped, (str, bytes, bytearray)): + decoded: str | bytes | bytearray + if isinstance(dumped, (bytes, bytearray)): + try: + decoded = dumped.decode() + except Exception: + decoded = dumped + else: + decoded = dumped + try: + converted = json.loads(decoded) + except Exception: + converted = decoded + safe_value = self._make_json_safe(converted) + return { + "kind": "dict" if isinstance(converted, dict) else "json", + "type": f"{data_cls.__module__}:{data_cls.__qualname__}", + "value": safe_value, + } + + details = self._serialise_request_details(request_data) + if details is not None: + safe_value = self._make_json_safe(details) + return { + "kind": "raw", + "type": f"{data_cls.__module__}:{data_cls.__qualname__}", + "value": safe_value, + } + + safe_value = self._make_json_safe(request_data) + return { + "kind": "raw", + "type": f"{data_cls.__module__}:{data_cls.__qualname__}", + "value": safe_value, + } + + def restore_state(self, state: dict[str, Any]) -> None: + """Restore pending request bookkeeping from checkpoint state.""" + self._request_events.clear() + stored_events = state.get("request_events", {}) + + for request_id, payload in stored_events.items(): + request_type_qual = payload.get("request_type", "") + try: + request_type = self._import_qualname(request_type_qual) + except Exception as exc: # pragma: no cover - defensive fallback + logger.debug( + "RequestInfoExecutor %s failed to import %s during restore: %s", + self.id, + request_type_qual, + exc, + ) + request_type = RequestInfoMessage + request_data_meta = payload.get("request_data", {}) + request_data = self._decode_request_data(request_data_meta) + event = RequestInfoEvent( + request_id=request_id, + source_executor_id=payload.get("source_executor_id", ""), + request_type=request_type, + request_data=request_data, + ) + self._request_events[request_id] = event + + @staticmethod + def _import_qualname(qualname: str) -> type[Any]: + module_name, _, type_name = qualname.partition(":") + if not module_name or not type_name: + raise ValueError(f"Invalid qualified name: {qualname}") + module = importlib.import_module(module_name) + attr: Any = module + for part in type_name.split("."): + attr = getattr(attr, part) + if not isinstance(attr, type): + raise TypeError(f"Resolved object is not a type: {qualname}") + return attr + + def _decode_request_data(self, metadata: dict[str, Any]) -> RequestInfoMessage: + kind = metadata.get("kind") + type_name = metadata.get("type", "") + value: Any = metadata.get("value", {}) + if type_name: + try: + imported = self._import_qualname(type_name) + except Exception as exc: # pragma: no cover - defensive fallback + logger.debug( + "RequestInfoExecutor %s failed to import %s during decode: %s", + self.id, + type_name, + exc, + ) + imported = RequestInfoMessage + else: + imported = RequestInfoMessage + target_cls: type[RequestInfoMessage] + if isinstance(imported, type) and issubclass(imported, RequestInfoMessage): + target_cls = imported + else: + target_cls = RequestInfoMessage + + if kind == "dataclass" and isinstance(value, dict): + with contextlib.suppress(TypeError): + return target_cls(**value) # type: ignore[arg-type] + + # Backwards-compat handling for checkpoints that used to store pydantic as "dict" + if kind in {"dict", "pydantic", "json"} and isinstance(value, dict): + from_dict = getattr(target_cls, "from_dict", None) + if callable(from_dict): + with contextlib.suppress(Exception): + return cast(RequestInfoMessage, from_dict(value)) + + if kind == "json" and isinstance(value, str): + from_json = getattr(target_cls, "from_json", None) + if callable(from_json): + with contextlib.suppress(Exception): + return cast(RequestInfoMessage, from_json(value)) + with contextlib.suppress(Exception): + parsed = json.loads(value) + if isinstance(parsed, dict): + return self._decode_request_data({"kind": "dict", "type": type_name, "value": parsed}) + + if isinstance(value, dict): + with contextlib.suppress(TypeError): + return target_cls(**value) # type: ignore[arg-type] + instance = object.__new__(target_cls) + instance.__dict__.update(value) # type: ignore[arg-type] + return instance + + with contextlib.suppress(Exception): + return target_cls() + return RequestInfoMessage() + + async def _write_executor_state(self, ctx: WorkflowContext[Any], pending: dict[str, Any]) -> None: + state = self.snapshot_state() + state["pending_requests"] = pending + try: + await ctx.set_state(state) + except Exception as exc: # pragma: no cover - transport specific + logger.warning(f"RequestInfoExecutor {self.id} failed to persist executor state: {exc}") + + def _build_request_snapshot( + self, + request: RequestInfoMessage, + source_executor_id: str, + ) -> dict[str, Any]: + snapshot: dict[str, Any] = { + "request_id": request.request_id, + "source_executor_id": source_executor_id, + "request_type": f"{type(request).__module__}:{type(request).__name__}", + "summary": repr(request), + } + + details = self._serialise_request_details(request) + if details: + snapshot["details"] = details + for key in ("prompt", "draft", "iteration"): + if key in details and key not in snapshot: + snapshot[key] = details[key] + + return snapshot + + def _serialise_request_details(self, request: RequestInfoMessage) -> dict[str, Any] | None: + if is_dataclass(request): + data = self._make_json_safe(asdict(request)) + if isinstance(data, dict): + return cast(dict[str, Any], data) + return None + + to_dict = getattr(request, "to_dict", None) + if callable(to_dict): + try: + dump = self._make_json_safe(to_dict()) + except TypeError: + dump = self._make_json_safe(to_dict()) + if isinstance(dump, dict): + return cast(dict[str, Any], dump) + return None + + to_json = getattr(request, "to_json", None) + if callable(to_json): + try: + raw = to_json() + except TypeError: + raw = to_json() + converted = raw + if isinstance(raw, (str, bytes, bytearray)): + decoded: str | bytes | bytearray + if isinstance(raw, (bytes, bytearray)): + try: + decoded = raw.decode() + except Exception: + decoded = raw + else: + decoded = raw + try: + converted = json.loads(decoded) + except Exception: + converted = decoded + dump = self._make_json_safe(converted) + if isinstance(dump, dict): + return cast(dict[str, Any], dump) + return None + + attrs = getattr(request, "__dict__", None) + if isinstance(attrs, dict): + cleaned = self._make_json_safe(attrs) + if isinstance(cleaned, dict): + return cast(dict[str, Any], cleaned) + + return None + + def _make_json_safe(self, value: Any) -> Any: + if value is None or isinstance(value, (str, int, float, bool)): + return value + if isinstance(value, Mapping): + safe_dict: dict[str, Any] = {} + for key, val in value.items(): # type: ignore[attr-defined] + safe_dict[str(key)] = self._make_json_safe(val) # type: ignore[arg-type] + return safe_dict + if isinstance(value, Sequence) and not isinstance(value, (str, bytes, bytearray)): + return [self._make_json_safe(item) for item in value] # type: ignore[misc] + return repr(value) + + async def has_pending_request(self, request_id: str, ctx: WorkflowContext[Any]) -> bool: + if request_id in self._request_events: + return True + snapshot = await self._get_pending_request_snapshot(request_id, ctx) + return snapshot is not None + + async def _rehydrate_request_event( + self, + request_id: str, + ctx: WorkflowContext[Any], + ) -> RequestInfoEvent | None: + snapshot = await self._get_pending_request_snapshot(request_id, ctx) + if snapshot is None: + return None + + source_executor_id = snapshot.get("source_executor_id") + if not isinstance(source_executor_id, str) or not source_executor_id: + return None + + request = self._construct_request_from_snapshot(snapshot) + if request is None: + return None + + event = RequestInfoEvent( + request_id=request_id, + source_executor_id=source_executor_id, + request_type=type(request), + request_data=request, + ) + self._request_events[request_id] = event + return event + + async def _get_pending_request_snapshot(self, request_id: str, ctx: WorkflowContext[Any]) -> dict[str, Any] | None: + pending = await self._collect_pending_request_snapshots(ctx) + snapshot = pending.get(request_id) + if snapshot is None: + return None + return snapshot + + async def _collect_pending_request_snapshots(self, ctx: WorkflowContext[Any]) -> dict[str, dict[str, Any]]: + combined: dict[str, dict[str, Any]] = {} + + try: + shared_pending = await ctx.get_shared_state(self._PENDING_SHARED_STATE_KEY) + except KeyError: + shared_pending = None + except Exception as exc: # pragma: no cover - transport specific + logger.warning(f"RequestInfoExecutor {self.id} failed to read shared pending state during rehydrate: {exc}") + shared_pending = None + + if isinstance(shared_pending, dict): + for key, value in shared_pending.items(): # type: ignore[attr-defined] + if isinstance(key, str) and isinstance(value, dict): + combined[key] = cast(dict[str, Any], value) + + try: + state = await ctx.get_state() + except Exception as exc: # pragma: no cover - transport specific + logger.warning(f"RequestInfoExecutor {self.id} failed to read runner state during rehydrate: {exc}") + state = None + + if isinstance(state, dict): + state_pending = state.get("pending_requests") + if isinstance(state_pending, dict): + for key, value in state_pending.items(): # type: ignore[attr-defined] + if isinstance(key, str) and isinstance(value, dict) and key not in combined: + combined[key] = cast(dict[str, Any], value) + + return combined + + def _construct_request_from_snapshot(self, snapshot: dict[str, Any]) -> RequestInfoMessage | None: + details_raw = snapshot.get("details") + details: dict[str, Any] = cast(dict[str, Any], details_raw) if isinstance(details_raw, dict) else {} + + request_cls: type[RequestInfoMessage] = RequestInfoMessage + request_type_str = snapshot.get("request_type") + if isinstance(request_type_str, str) and ":" in request_type_str: + module_name, class_name = request_type_str.split(":", 1) + try: + module = importlib.import_module(module_name) + candidate = getattr(module, class_name) + if isinstance(candidate, type) and issubclass(candidate, RequestInfoMessage): + request_cls = candidate + except Exception as exc: + logger.warning(f"RequestInfoExecutor {self.id} could not import {module_name}.{class_name}: {exc}") + request_cls = RequestInfoMessage + + request: RequestInfoMessage | None = self._instantiate_request(request_cls, details) + + if request is None and request_cls is not RequestInfoMessage: + request = self._instantiate_request(RequestInfoMessage, details) + + if request is None: + logger.warning( + f"RequestInfoExecutor {self.id} could not reconstruct request " + f"{request_type_str or RequestInfoMessage.__name__} from snapshot keys {sorted(details.keys())}" + ) + return None + + for key, value in details.items(): + if key == "request_id": + continue + try: + setattr(request, key, value) + except Exception as exc: + logger.debug( + f"RequestInfoExecutor {self.id} could not set attribute {key} on {type(request).__name__}: {exc}" + ) + continue + + snapshot_request_id = snapshot.get("request_id") + if isinstance(snapshot_request_id, str) and snapshot_request_id: + try: + request.request_id = snapshot_request_id + except Exception as exc: + logger.debug( + f"RequestInfoExecutor {self.id} could not apply snapshot " + f"request_id to {type(request).__name__}: {exc}" + ) + + return request + + def _instantiate_request( + self, + request_cls: type[RequestInfoMessage], + details: dict[str, Any], + ) -> RequestInfoMessage | None: + try: + from_dict = getattr(request_cls, "from_dict", None) + if callable(from_dict): + return cast(RequestInfoMessage, from_dict(details)) + except (TypeError, ValueError) as exc: + logger.debug(f"RequestInfoExecutor {self.id} failed to hydrate {request_cls.__name__} via from_dict: {exc}") + except Exception as exc: + logger.warning( + f"RequestInfoExecutor {self.id} encountered unexpected error during " + f"{request_cls.__name__}.from_dict: {exc}" + ) + + if is_dataclass(request_cls): + try: + field_names = {f.name for f in fields(request_cls)} + ctor_kwargs = {name: details[name] for name in field_names if name in details} + return request_cls(**ctor_kwargs) + except (TypeError, ValueError) as exc: + logger.debug( + f"RequestInfoExecutor {self.id} could not instantiate dataclass " + f"{request_cls.__name__} with snapshot data: {exc}" + ) + except Exception as exc: + logger.warning( + f"RequestInfoExecutor {self.id} encountered unexpected error " + f"constructing dataclass {request_cls.__name__}: {exc}" + ) + + try: + instance = request_cls() + except Exception as exc: + logger.warning( + f"RequestInfoExecutor {self.id} could not instantiate {request_cls.__name__} without arguments: {exc}" + ) + return None + + for key, value in details.items(): + if key == "request_id": + continue + try: + setattr(instance, key, value) + except Exception as exc: + logger.debug( + f"RequestInfoExecutor {self.id} could not set attribute {key} on " + f"{request_cls.__name__} during instantiation: {exc}" + ) + continue + + return instance + + @staticmethod + def pending_requests_from_checkpoint( + checkpoint: WorkflowCheckpoint, + *, + request_executor_ids: Iterable[str] | None = None, + ) -> list[PendingRequestDetails]: + executor_filter: set[str] | None = None + if request_executor_ids is not None: + executor_filter = {str(value) for value in request_executor_ids} + + pending: dict[str, PendingRequestDetails] = {} + + shared_map = checkpoint.shared_state.get(RequestInfoExecutor._PENDING_SHARED_STATE_KEY) + if isinstance(shared_map, Mapping): + for request_id, snapshot in shared_map.items(): # type: ignore[attr-defined] + RequestInfoExecutor._merge_snapshot(pending, str(request_id), snapshot) # type: ignore[arg-type] + + for state in checkpoint.executor_states.values(): + if not isinstance(state, Mapping): + continue + inner = state.get("pending_requests") + if isinstance(inner, Mapping): + for request_id, snapshot in inner.items(): # type: ignore[attr-defined] + RequestInfoExecutor._merge_snapshot(pending, str(request_id), snapshot) # type: ignore[arg-type] + + for source_id, message_list in checkpoint.messages.items(): + if executor_filter is not None and source_id not in executor_filter: + continue + if not isinstance(message_list, list): + continue + for message in message_list: + if not isinstance(message, Mapping): + continue + payload = _decode_checkpoint_value(message.get("data")) + RequestInfoExecutor._merge_message_payload(pending, payload, message) + + return list(pending.values()) + + @staticmethod + def checkpoint_summary( + checkpoint: WorkflowCheckpoint, + *, + request_executor_ids: Iterable[str] | None = None, + preview_width: int = 70, + ) -> WorkflowCheckpointSummary: + targets = sorted(checkpoint.messages.keys()) + executor_states = sorted(checkpoint.executor_states.keys()) + pending = RequestInfoExecutor.pending_requests_from_checkpoint( + checkpoint, request_executor_ids=request_executor_ids + ) + + draft_preview: str | None = None + for entry in pending: + if entry.draft: + draft_preview = shorten(entry.draft, width=preview_width, placeholder="…") + break + + status = "idle" + if pending: + status = "awaiting human response" + elif not checkpoint.messages and "finalise" in executor_states: + status = "completed" + elif checkpoint.messages: + status = "awaiting next superstep" + elif request_executor_ids is not None and any(tid in targets for tid in request_executor_ids): + status = "awaiting request delivery" + + return WorkflowCheckpointSummary( + checkpoint_id=checkpoint.checkpoint_id, + iteration_count=checkpoint.iteration_count, + targets=targets, + executor_states=executor_states, + status=status, + draft_preview=draft_preview, + pending_requests=pending, + ) + + @staticmethod + def _merge_snapshot( + pending: dict[str, PendingRequestDetails], + request_id: str, + snapshot: Any, + ) -> None: + if not request_id or not isinstance(snapshot, Mapping): + return + + details = pending.setdefault(request_id, PendingRequestDetails(request_id=request_id)) + + RequestInfoExecutor._apply_update( + details, + prompt=snapshot.get("prompt"), # type: ignore[attr-defined] + draft=snapshot.get("draft"), # type: ignore[attr-defined] + iteration=snapshot.get("iteration"), # type: ignore[attr-defined] + source_executor_id=snapshot.get("source_executor_id"), # type: ignore[attr-defined] + ) + + extra = snapshot.get("details") # type: ignore[attr-defined] + if isinstance(extra, Mapping): + RequestInfoExecutor._apply_update( + details, + prompt=extra.get("prompt"), # type: ignore[attr-defined] + draft=extra.get("draft"), # type: ignore[attr-defined] + iteration=extra.get("iteration"), # type: ignore[attr-defined] + ) + + @staticmethod + def _merge_message_payload( + pending: dict[str, PendingRequestDetails], + payload: Any, + raw_message: Mapping[str, Any], + ) -> None: + if isinstance(payload, RequestResponse): + request_id = payload.request_id or RequestInfoExecutor._get_field(payload.original_request, "request_id") # type: ignore[arg-type] + if not request_id: + return + details = pending.setdefault(request_id, PendingRequestDetails(request_id=request_id)) + RequestInfoExecutor._apply_update( + details, + prompt=RequestInfoExecutor._get_field(payload.original_request, "prompt"), # type: ignore[arg-type] + draft=RequestInfoExecutor._get_field(payload.original_request, "draft"), # type: ignore[arg-type] + iteration=RequestInfoExecutor._get_field(payload.original_request, "iteration"), # type: ignore[arg-type] + source_executor_id=raw_message.get("source_id"), + original_request=payload.original_request, # type: ignore[arg-type] + ) + elif isinstance(payload, RequestInfoMessage): + request_id = getattr(payload, "request_id", None) + if not request_id: + return + details = pending.setdefault(request_id, PendingRequestDetails(request_id=request_id)) + RequestInfoExecutor._apply_update( + details, + prompt=getattr(payload, "prompt", None), + draft=getattr(payload, "draft", None), + iteration=getattr(payload, "iteration", None), + source_executor_id=raw_message.get("source_id"), + original_request=payload, + ) + + @staticmethod + def _apply_update( + details: PendingRequestDetails, + *, + prompt: Any = None, + draft: Any = None, + iteration: Any = None, + source_executor_id: Any = None, + original_request: Any = None, + ) -> None: + if prompt and not details.prompt: + details.prompt = str(prompt) + if draft and not details.draft: + details.draft = str(draft) + if iteration is not None and details.iteration is None: + coerced = RequestInfoExecutor._coerce_int(iteration) + if coerced is not None: + details.iteration = coerced + if source_executor_id and not details.source_executor_id: + details.source_executor_id = str(source_executor_id) + if original_request is not None and details.original_request is None: + details.original_request = original_request + + @staticmethod + def _get_field(obj: Any, key: str) -> Any: + if obj is None: + return None + if isinstance(obj, Mapping): + return obj.get(key) # type: ignore[attr-defined,return-value] + return getattr(obj, key, None) + + @staticmethod + def _coerce_int(value: Any) -> int | None: + try: + return int(value) + except (TypeError, ValueError): + return None diff --git a/python/packages/core/agent_framework/_workflows/_runner.py b/python/packages/core/agent_framework/_workflows/_runner.py index a8f845637e..11a0499acf 100644 --- a/python/packages/core/agent_framework/_workflows/_runner.py +++ b/python/packages/core/agent_framework/_workflows/_runner.py @@ -22,7 +22,7 @@ from ._shared_state import SharedState if TYPE_CHECKING: - from ._executor import RequestInfoExecutor + from ._request_info_executor import RequestInfoExecutor logger = logging.getLogger(__name__) @@ -372,7 +372,7 @@ def _find_request_info_executor(self) -> "RequestInfoExecutor | None": Returns: The RequestInfoExecutor instance if found, None otherwise. """ - from ._executor import RequestInfoExecutor + from ._request_info_executor import RequestInfoExecutor for executor in self._executors.values(): if isinstance(executor, RequestInfoExecutor): @@ -388,7 +388,7 @@ def _is_message_to_request_info_executor(self, msg: "Message") -> bool: Returns: True if the message targets a RequestInfoExecutor, False otherwise. """ - from ._executor import RequestInfoExecutor + from ._request_info_executor import RequestInfoExecutor if not msg.target_id: return False diff --git a/python/packages/core/agent_framework/_workflows/_runner_context.py b/python/packages/core/agent_framework/_workflows/_runner_context.py index 5dc135d674..4de70cb591 100644 --- a/python/packages/core/agent_framework/_workflows/_runner_context.py +++ b/python/packages/core/agent_framework/_workflows/_runner_context.py @@ -12,7 +12,7 @@ from ._checkpoint import CheckpointStorage, WorkflowCheckpoint from ._const import DEFAULT_MAX_ITERATIONS -from ._events import AgentRunUpdateEvent, WorkflowEvent +from ._events import WorkflowEvent from ._shared_state import SharedState logger = logging.getLogger(__name__) @@ -487,28 +487,6 @@ async def add_event(self, event: WorkflowEvent) -> None: Events are enqueued so runners can stream them in real time instead of waiting for superstep boundaries. """ - # Filter out empty AgentRunUpdateEvent updates to avoid emitting None/empty chunks - try: - if isinstance(event, AgentRunUpdateEvent): - update = getattr(event, "data", None) - # Skip if no update payload - if not update: - return - # Robust emptiness check: allow either top-level text or any text-bearing content - text_val = getattr(update, "text", None) - contents = getattr(update, "contents", None) - has_text_content = False - if contents: - for c in contents: - if getattr(c, "text", None): - has_text_content = True - break - if not (text_val or has_text_content): - return - except Exception as exc: # pragma: no cover - defensive logging path - # Best-effort filtering only; never block event delivery on filtering errors - logger.debug(f"Error while filtering event {event!r}: {exc}", exc_info=True) - await self._event_queue.put(event) async def drain_events(self) -> list[WorkflowEvent]: diff --git a/python/packages/core/agent_framework/_workflows/_validation.py b/python/packages/core/agent_framework/_workflows/_validation.py index 95f522c225..5cd7940ff3 100644 --- a/python/packages/core/agent_framework/_workflows/_validation.py +++ b/python/packages/core/agent_framework/_workflows/_validation.py @@ -9,7 +9,8 @@ from typing import Any, Union, get_args, get_origin from ._edge import Edge, EdgeGroup, FanInEdgeGroup -from ._executor import Executor, RequestInfoExecutor +from ._executor import Executor +from ._request_info_executor import RequestInfoExecutor logger = logging.getLogger(__name__) diff --git a/python/packages/core/agent_framework/_workflows/_workflow.py b/python/packages/core/agent_framework/_workflows/_workflow.py index 0782dbd1cd..d9270bfe02 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow.py +++ b/python/packages/core/agent_framework/_workflows/_workflow.py @@ -37,8 +37,9 @@ WorkflowStatusEvent, _framework_event_origin, # type: ignore ) -from ._executor import Executor, RequestInfoExecutor +from ._executor import Executor from ._model_utils import DictConvertible +from ._request_info_executor import RequestInfoExecutor from ._runner import Runner from ._runner_context import InProcRunnerContext, RunnerContext from ._shared_state import SharedState @@ -742,7 +743,7 @@ def _find_request_info_executor(self) -> RequestInfoExecutor | None: Returns: The RequestInfoExecutor instance if found, None otherwise. """ - from ._executor import RequestInfoExecutor + from ._request_info_executor import RequestInfoExecutor for executor in self.executors.values(): if isinstance(executor, RequestInfoExecutor): diff --git a/python/packages/core/agent_framework/_workflows/_workflow_executor.py b/python/packages/core/agent_framework/_workflows/_workflow_executor.py index 55f1fab264..501ce0d8f1 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_executor.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_executor.py @@ -19,10 +19,12 @@ ) from ._executor import ( Executor, + handler, +) +from ._request_info_executor import ( RequestInfoExecutor, RequestInfoMessage, RequestResponse, - handler, ) from ._typing_utils import is_instance_of from ._workflow_context import WorkflowContext diff --git a/python/packages/core/agent_framework/azure/_responses_client.py b/python/packages/core/agent_framework/azure/_responses_client.py index 3d8186bb2d..1d88d51688 100644 --- a/python/packages/core/agent_framework/azure/_responses_client.py +++ b/python/packages/core/agent_framework/azure/_responses_client.py @@ -21,8 +21,8 @@ TAzureOpenAIResponsesClient = TypeVar("TAzureOpenAIResponsesClient", bound="AzureOpenAIResponsesClient") -@use_observability @use_function_invocation +@use_observability @use_chat_middleware class AzureOpenAIResponsesClient(AzureOpenAIConfigMixin, OpenAIBaseResponsesClient): """Azure Responses completion class.""" diff --git a/python/packages/core/agent_framework/azure/_shared.py b/python/packages/core/agent_framework/azure/_shared.py index 4ed66ebb7f..093a4086f1 100644 --- a/python/packages/core/agent_framework/azure/_shared.py +++ b/python/packages/core/agent_framework/azure/_shared.py @@ -171,7 +171,6 @@ def __init__( Args: deployment_name: Name of the deployment. - ai_model_type: The type of OpenAI model to deploy. endpoint: The specific endpoint URL for the deployment. base_url: The base URL for Azure services. api_version: Azure API version. Defaults to the defined DEFAULT_AZURE_API_VERSION. diff --git a/python/packages/core/agent_framework/exceptions.py b/python/packages/core/agent_framework/exceptions.py index ef71df0657..971b612ea3 100644 --- a/python/packages/core/agent_framework/exceptions.py +++ b/python/packages/core/agent_framework/exceptions.py @@ -140,3 +140,9 @@ class MiddlewareException(AgentFrameworkException): """An error occurred during middleware execution.""" pass + + +class ContentError(AgentFrameworkException): + """An error occurred while processing content.""" + + pass diff --git a/python/packages/core/agent_framework/observability.py b/python/packages/core/agent_framework/observability.py index af3973b141..645499471d 100644 --- a/python/packages/core/agent_framework/observability.py +++ b/python/packages/core/agent_framework/observability.py @@ -825,7 +825,7 @@ async def trace_get_response( ) -> "ChatResponse": global OBSERVABILITY_SETTINGS if not OBSERVABILITY_SETTINGS.ENABLED: - # If model diagnostics are not enabled, just return the completion + # If model_id diagnostics are not enabled, just return the completion return await func( self, messages=messages, @@ -836,7 +836,7 @@ async def trace_get_response( if "operation_duration_histogram" not in self.additional_properties: self.additional_properties["operation_duration_histogram"] = _get_duration_histogram() model_id = ( - kwargs.get("model") + kwargs.get("model_id") or (chat_options.model_id if (chat_options := kwargs.get("chat_options")) else None) or getattr(self, "model_id", None) ) @@ -848,7 +848,7 @@ async def trace_get_response( attributes = _get_span_attributes( operation_name=OtelAttr.CHAT_COMPLETION_OPERATION, provider_name=provider_name, - model_id=model_id, + model=model_id, service_url=service_url, **kwargs, ) @@ -923,7 +923,7 @@ async def trace_get_streaming_response( self.additional_properties["operation_duration_histogram"] = _get_duration_histogram() model_id = ( - kwargs.get("model") + kwargs.get("model_id") or (chat_options.model_id if (chat_options := kwargs.get("chat_options")) else None) or getattr(self, "model_id", None) ) @@ -935,7 +935,7 @@ async def trace_get_streaming_response( attributes = _get_span_attributes( operation_name=OtelAttr.CHAT_COMPLETION_OPERATION, provider_name=provider_name, - model_id=model_id, + model=model_id, service_url=service_url, **kwargs, ) @@ -1346,7 +1346,7 @@ def _get_span_attributes(**kwargs: Any) -> dict[str, Any]: attributes[SpanAttributes.LLM_SYSTEM] = system_name if provider_name := kwargs.get("provider_name"): attributes[OtelAttr.PROVIDER_NAME] = provider_name - attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs.get("model_id", "unknown") + attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs.get("model", "unknown") if service_url := kwargs.get("service_url"): attributes[OtelAttr.ADDRESS] = service_url if conversation_id := kwargs.get("conversation_id", chat_options.conversation_id): diff --git a/python/packages/core/agent_framework/openai/_assistants_client.py b/python/packages/core/agent_framework/openai/_assistants_client.py index 7fb81ad485..f5a57683db 100644 --- a/python/packages/core/agent_framework/openai/_assistants_client.py +++ b/python/packages/core/agent_framework/openai/_assistants_client.py @@ -425,7 +425,7 @@ def _prepare_options( "json_schema": chat_options.response_format.model_json_schema(), } - instructions: list[str] = [chat_options.instructions] if chat_options and chat_options.instructions else [] + instructions: list[str] = [] tool_results: list[FunctionResultContent] | None = None additional_messages: list[AdditionalMessage] | None = None diff --git a/python/packages/core/agent_framework/openai/_chat_client.py b/python/packages/core/agent_framework/openai/_chat_client.py index 367004a4bd..59d9f6f97a 100644 --- a/python/packages/core/agent_framework/openai/_chat_client.py +++ b/python/packages/core/agent_framework/openai/_chat_client.py @@ -154,10 +154,13 @@ def _process_web_search_tool( def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions) -> dict[str, Any]: # Preprocess web search tool if it exists - options_dict = chat_options.to_provider_settings() - instructions = options_dict.pop("instructions", None) - if instructions: - messages = [ChatMessage(role="system", text=instructions), *messages] + options_dict = chat_options.to_dict( + exclude={ + "type", + "instructions", # included as system message + } + ) + if messages and "messages" not in options_dict: options_dict["messages"] = self._prepare_chat_history_for_request(messages) if "messages" not in options_dict: @@ -172,14 +175,20 @@ def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: options_dict.pop("parallel_tool_calls", None) options_dict.pop("tool_choice", None) - if "model" not in options_dict: + if "model_id" not in options_dict: options_dict["model"] = self.model_id + else: + options_dict["model"] = options_dict.pop("model_id") if ( chat_options.response_format and isinstance(chat_options.response_format, type) and issubclass(chat_options.response_format, BaseModel) ): options_dict["response_format"] = type_to_response_format_param(chat_options.response_format) + if additional_properties := options_dict.pop("additional_properties", None): + for key, value in additional_properties.items(): + if value is not None: + options_dict[key] = value return options_dict def _create_chat_response(self, response: ChatCompletion, chat_options: ChatOptions) -> "ChatResponse": diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index 108949b2e0..c5b1f1073c 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -300,41 +300,51 @@ def _tools_to_response_tools( def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: ChatOptions) -> dict[str, Any]: """Take ChatOptions and create the specific options for Responses API.""" - options_dict: dict[str, Any] = {} - - if chat_options.max_tokens is not None: - options_dict["max_output_tokens"] = chat_options.max_tokens - - if chat_options.temperature is not None: - options_dict["temperature"] = chat_options.temperature + options_dict: dict[str, Any] = chat_options.to_dict( + exclude={ + "type", + "response_format", # handled in inner get methods + "presence_penalty", # not supported + "frequency_penalty", # not supported + "logit_bias", # not supported + "seed", # not supported + "stop", # not supported + "instructions", # already added as system message + } + ) + translations = { + "model_id": "model", + "allow_multiple_tool_calls": "parallel_tool_calls", + "conversation_id": "previous_response_id", + "max_tokens": "max_output_tokens", + } + for old_key, new_key in translations.items(): + if old_key in options_dict and old_key != new_key: + options_dict[new_key] = options_dict.pop(old_key) - if chat_options.top_p is not None: - options_dict["top_p"] = chat_options.top_p + # tools + if chat_options.tools is None: + options_dict.pop("parallel_tool_calls", None) + else: + options_dict["tools"] = self._tools_to_response_tools(chat_options.tools) - if chat_options.user is not None: - options_dict["user"] = chat_options.user + # model id + if not options_dict.get("model"): + options_dict["model"] = self.model_id # messages - if instructions := options_dict.pop("instructions", None): - messages = [ChatMessage(role="system", text=instructions), *messages] request_input = self._prepare_chat_messages_for_request(messages) if not request_input: raise ServiceInvalidRequestError("Messages are required for chat completions") options_dict["input"] = request_input - # tools - if chat_options.tools is None: - options_dict.pop("parallel_tool_calls", None) - else: - options_dict["tools"] = self._tools_to_response_tools(chat_options.tools) - - # other settings - options_dict["store"] = chat_options.store is True - - if chat_options.conversation_id: - options_dict["previous_response_id"] = chat_options.conversation_id - if "model" not in options_dict: - options_dict["model"] = self.model_id + # additional provider specific settings + if additional_properties := options_dict.pop("additional_properties", None): + for key, value in additional_properties.items(): + if value is not None: + options_dict[key] = value + if "store" not in options_dict: + options_dict["store"] = False return options_dict def _prepare_chat_messages_for_request(self, chat_messages: Sequence[ChatMessage]) -> list[dict[str, Any]]: @@ -630,6 +640,11 @@ def _create_response_content( additional_properties=additional_properties, ) ) + if hasattr(item, "summary") and item.summary: + for summary in item.summary: + contents.append( + TextReasoningContent(text=summary.text, raw_representation=summary) # type: ignore[arg-type] + ) case "code_interpreter_call": # ResponseOutputCodeInterpreterCall if hasattr(item, "outputs") and item.outputs: for code_output in item.outputs: diff --git a/python/packages/core/pyproject.toml b/python/packages/core/pyproject.toml index d1036d0406..9909d9abd6 100644 --- a/python/packages/core/pyproject.toml +++ b/python/packages/core/pyproject.toml @@ -4,7 +4,7 @@ description = "Microsoft Agent Framework for building AI Agents with Python. Thi authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251001" +version = "1.0.0b251007" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" @@ -22,7 +22,7 @@ classifiers = [ "Typing :: Typed", ] dependencies = [ - "openai>=1.99.0", + "openai>=1.99.0,<2", "pydantic>=2,<3", "pydantic-settings>=2,<3", "typing-extensions", diff --git a/python/packages/core/tests/azure/test_azure_assistants_client.py b/python/packages/core/tests/azure/test_azure_assistants_client.py index 758be68d3b..307e6c7ac1 100644 --- a/python/packages/core/tests/azure/test_azure_assistants_client.py +++ b/python/packages/core/tests/azure/test_azure_assistants_client.py @@ -15,6 +15,7 @@ ChatAgent, ChatClientProtocol, ChatMessage, + ChatOptions, ChatResponse, ChatResponseUpdate, HostedCodeInterpreterTool, @@ -154,6 +155,18 @@ def test_azure_assistants_client_init_with_default_headers(azure_openai_unit_tes assert chat_client.client.default_headers[key] == value +def test_azure_assistants_client_instructions_sent_once(mock_async_azure_openai: MagicMock) -> None: + """Ensure instructions are only included once for Azure OpenAI Assistants requests.""" + chat_client = create_test_azure_assistants_client(mock_async_azure_openai) + instructions = "You are a helpful assistant." + chat_options = ChatOptions(instructions=instructions) + + prepared_messages = chat_client.prepare_messages([ChatMessage(role="user", text="Hello")], chat_options) + run_options, _ = chat_client._prepare_options(prepared_messages, chat_options) # type: ignore[reportPrivateUsage] + + assert run_options.get("instructions") == instructions + + async def test_azure_assistants_client_get_assistant_id_or_create_existing_assistant( mock_async_azure_openai: MagicMock, ) -> None: diff --git a/python/packages/core/tests/azure/test_azure_chat_client.py b/python/packages/core/tests/azure/test_azure_chat_client.py index aad231ac98..864b3fa20a 100644 --- a/python/packages/core/tests/azure/test_azure_chat_client.py +++ b/python/packages/core/tests/azure/test_azure_chat_client.py @@ -23,6 +23,7 @@ ChatAgent, ChatClientProtocol, ChatMessage, + ChatOptions, ChatResponse, ChatResponseUpdate, TextContent, @@ -83,6 +84,18 @@ def test_init_base_url(azure_openai_unit_test_env: dict[str, str]) -> None: assert azure_chat_client.client.default_headers[key] == value +def test_azure_openai_chat_client_instructions_sent_once(azure_openai_unit_test_env: dict[str, str]) -> None: + """Ensure instructions are only included once when preparing Azure OpenAI chat requests.""" + client = AzureOpenAIChatClient() + instructions = "You are a helpful assistant." + chat_options = ChatOptions(instructions=instructions) + + prepared_messages = client.prepare_messages([ChatMessage(role="user", text="Hello")], chat_options) + request_options = client._prepare_options(prepared_messages, chat_options) # type: ignore[reportPrivateUsage] + + assert json.dumps(request_options).count(instructions) == 1 + + @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_BASE_URL"]], indirect=True) def test_init_endpoint(azure_openai_unit_test_env: dict[str, str]) -> None: azure_chat_client = AzureOpenAIChatClient() diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py index a495d05837..658aa21457 100644 --- a/python/packages/core/tests/azure/test_azure_responses_client.py +++ b/python/packages/core/tests/azure/test_azure_responses_client.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. +import json import os from typing import Annotated @@ -14,6 +15,7 @@ ChatAgent, ChatClientProtocol, ChatMessage, + ChatOptions, ChatResponse, ChatResponseUpdate, HostedCodeInterpreterTool, @@ -112,6 +114,18 @@ def test_init_with_default_header(azure_openai_unit_test_env: dict[str, str]) -> assert azure_responses_client.client.default_headers[key] == value +def test_azure_responses_client_instructions_sent_once(azure_openai_unit_test_env: dict[str, str]) -> None: + """Ensure instructions are only included once for Azure OpenAI Responses requests.""" + client = AzureOpenAIResponsesClient() + instructions = "You are a helpful assistant." + chat_options = ChatOptions(instructions=instructions) + + prepared_messages = client.prepare_messages([ChatMessage(role="user", text="Hello")], chat_options) + request_options = client._prepare_options(prepared_messages, chat_options) # type: ignore[reportPrivateUsage] + + assert json.dumps(request_options).count(instructions) == 1 + + @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_RESPONSES_DEPLOYMENT_NAME"]], indirect=True) def test_init_with_empty_model_id(azure_openai_unit_test_env: dict[str, str]) -> None: with pytest.raises(ServiceInitializationError): diff --git a/python/packages/core/tests/core/test_observability.py b/python/packages/core/tests/core/test_observability.py index e83fa02bd3..c79f31dca4 100644 --- a/python/packages/core/tests/core/test_observability.py +++ b/python/packages/core/tests/core/test_observability.py @@ -238,7 +238,7 @@ async def test_chat_client_observability(mock_chat_client, span_exporter: InMemo messages = [ChatMessage(role=Role.USER, text="Test message")] span_exporter.clear() - response = await client.get_response(messages=messages, model="Test") + response = await client.get_response(messages=messages, model_id="Test") assert response is not None spans = span_exporter.get_finished_spans() assert len(spans) == 1 @@ -263,7 +263,7 @@ async def test_chat_client_streaming_observability( span_exporter.clear() # Collect all yielded updates updates = [] - async for update in client.get_streaming_response(messages=messages, model="Test"): + async for update in client.get_streaming_response(messages=messages, model_id="Test"): updates.append(update) # Verify we got the expected updates, this shouldn't be dependent on otel diff --git a/python/packages/core/tests/core/test_threads.py b/python/packages/core/tests/core/test_threads.py index c04cab577e..8049501789 100644 --- a/python/packages/core/tests/core/test_threads.py +++ b/python/packages/core/tests/core/test_threads.py @@ -224,11 +224,15 @@ async def test_deserialize_with_existing_store(self) -> None: """Test _deserialize with existing message store.""" store = MockChatMessageStore() thread = AgentThread(message_store=store) - serialized_data: dict[str, Any] = {"service_thread_id": None, "chat_message_store_state": {"messages": []}} + serialized_data: dict[str, Any] = { + "service_thread_id": None, + "chat_message_store_state": {"messages": [ChatMessage(role="user", text="test")]}, + } await thread.update_from_thread_state(serialized_data) - assert store._deserialize_calls == 1 # pyright: ignore[reportPrivateUsage] + assert store._messages + assert store._messages[0].text == "test" async def test_serialize_with_service_thread_id(self) -> None: """Test serialize with service_thread_id.""" @@ -268,6 +272,23 @@ async def test_serialize_with_kwargs(self) -> None: assert store._serialize_calls == 1 # pyright: ignore[reportPrivateUsage] + async def test_serialize_round_trip_messages(self, sample_messages: list[ChatMessage]) -> None: + """Test a roundtrip of the serialization.""" + store = ChatMessageStore(sample_messages) + thread = AgentThread(message_store=store) + new_thread = await AgentThread.deserialize(await thread.serialize()) + assert new_thread.message_store is not None + new_messages = await new_thread.message_store.list_messages() + assert len(new_messages) == len(sample_messages) + assert {new.text for new in new_messages} == {orig.text for orig in sample_messages} + + async def test_serialize_round_trip_thread_id(self) -> None: + """Test a roundtrip of the serialization.""" + thread = AgentThread(service_thread_id="test-1234") + new_thread = await AgentThread.deserialize(await thread.serialize()) + assert new_thread.message_store is None + assert new_thread.service_thread_id == "test-1234" + class TestChatMessageList: """Test cases for ChatMessageStore class.""" @@ -377,7 +398,7 @@ def test_init_with_service_thread_id(self) -> None: def test_init_with_chat_message_store_state(self) -> None: """Test AgentThreadState initialization with chat_message_store_state.""" store_data: dict[str, Any] = {"messages": []} - state = AgentThreadState.model_validate({"chat_message_store_state": store_data}) + state = AgentThreadState.from_dict({"chat_message_store_state": store_data}) assert state.service_thread_id is None assert state.chat_message_store_state.messages == [] @@ -385,9 +406,7 @@ def test_init_with_chat_message_store_state(self) -> None: def test_init_with_both(self) -> None: """Test AgentThreadState initialization with both parameters.""" store_data: dict[str, Any] = {"messages": []} - with pytest.raises( - AgentThreadException, match="Only one of service_thread_id or chat_message_store_state may be set" - ): + with pytest.raises(AgentThreadException): AgentThreadState(service_thread_id="test-conv-123", chat_message_store_state=store_data) def test_init_defaults(self) -> None: diff --git a/python/packages/core/tests/core/test_types.py b/python/packages/core/tests/core/test_types.py index d017d7a14b..909e72a0a0 100644 --- a/python/packages/core/tests/core/test_types.py +++ b/python/packages/core/tests/core/test_types.py @@ -4,13 +4,12 @@ from typing import Any import pytest -from pydantic import BaseModel, ValidationError +from pydantic import BaseModel from pytest import fixture, mark, raises from agent_framework import ( AgentRunResponse, AgentRunResponseUpdate, - AIFunction, BaseContent, ChatMessage, ChatOptions, @@ -37,7 +36,7 @@ UsageDetails, ai_function, ) -from agent_framework.exceptions import AdditionItemMismatch +from agent_framework.exceptions import AdditionItemMismatch, ContentError @fixture @@ -451,7 +450,8 @@ def test_ai_content_serialization(content_type: type[BaseContent], args: dict): else: # Normal attribute checking for other content types for key, value in args.items(): - assert getattr(deserialized, key) == value + if value: + assert getattr(deserialized, key) == value # For now, skip the TestModel validation since it still uses Pydantic # This would need to be updated when we migrate more classes @@ -772,53 +772,11 @@ def test_chat_options_init() -> None: assert options.model_id is None -def test_chat_options_init_with_args(ai_function_tool, ai_tool) -> None: - options = ChatOptions( - model_id="gpt-4", - max_tokens=1024, - temperature=0.7, - top_p=0.9, - presence_penalty=0.0, - frequency_penalty=0.0, - user="user-123", - tools=[ai_function_tool, ai_tool], - tool_choice="required", - additional_properties={"custom": True}, - logit_bias={"a": 1}, - metadata={"m": "v"}, - ) - assert options.model_id == "gpt-4" - assert options.max_tokens == 1024 - assert options.temperature == 0.7 - assert options.top_p == 0.9 - assert options.presence_penalty == 0.0 - assert options.frequency_penalty == 0.0 - assert options.user == "user-123" - for tool in options.tools: - assert isinstance(tool, ToolProtocol) - assert tool.name is not None - assert tool.description is not None - if isinstance(tool, AIFunction): - assert tool.parameters() is not None - - settings = options.to_provider_settings() - assert settings["model"] == "gpt-4" # uses alias - assert settings["tool_choice"] == "required" # serialized via model_serializer - assert settings["custom"] is True # from additional_properties - assert "additional_properties" not in settings - - def test_chat_options_tool_choice_validation_errors(): - with raises((ValidationError, TypeError)): + with raises((ContentError, TypeError)): ChatOptions(tool_choice="invalid-choice") -def test_chat_options_tool_choice_excluded_when_no_tools(): - options = ChatOptions(tool_choice="auto") - settings = options.to_provider_settings() - assert "tool_choice" not in settings - - def test_chat_options_and(ai_function_tool, ai_tool) -> None: options1 = ChatOptions(model_id="gpt-4o", tools=[ai_function_tool], logit_bias={"x": 1}, metadata={"a": "b"}) options2 = ChatOptions(model_id="gpt-4.1", tools=[ai_tool], additional_properties={"p": 1}) @@ -1059,69 +1017,6 @@ def test_chat_tool_mode_eq_with_string(): assert ToolMode.AUTO == "auto" -def test_chat_options_tool_choice_dict_mapping(ai_tool): - opts = ChatOptions(tool_choice={"mode": "required", "required_function_name": "fn"}, tools=[ai_tool]) - assert isinstance(opts.tool_choice, ToolMode) - assert opts.tool_choice.mode == "required" - assert opts.tool_choice.required_function_name == "fn" - # provider settings serialize to just the mode - settings = opts.to_provider_settings() - assert settings["tool_choice"] == "required" - - -def test_chat_options_to_provider_settings_with_falsy_values(): - """Test that falsy values (except None) are included in provider settings.""" - options = ChatOptions( - temperature=0.0, # falsy but not None - top_p=0.0, # falsy but not None - presence_penalty=False, # falsy but not None - frequency_penalty=None, # None - should be excluded - additional_properties={"empty_string": "", "zero": 0, "false_flag": False, "none_value": None}, - ) - - settings = options.to_provider_settings() - - # Falsy values that are not None should be included - assert "temperature" in settings - assert isinstance(settings["temperature"], float) - assert settings["temperature"] == 0.0 - assert "top_p" in settings - assert isinstance(settings["top_p"], float) - assert settings["top_p"] == 0.0 - assert "presence_penalty" in settings - assert isinstance(settings["presence_penalty"], float) # converted to float - assert settings["presence_penalty"] == 0.0 - - # None values should be excluded - assert "frequency_penalty" not in settings - - # Additional properties - falsy values should always be included - assert "empty_string" in settings - assert settings["empty_string"] == "" - assert "zero" in settings - assert settings["zero"] == 0 - assert "false_flag" in settings - assert settings["false_flag"] is False - assert "none_value" in settings - assert settings["none_value"] is None - - -def test_chat_options_empty_logit_bias_and_metadata_excluded(): - """Test that empty logit_bias and metadata are excluded from provider settings.""" - options = ChatOptions( - model_id="gpt-4o", - logit_bias={}, # empty dict should be excluded - metadata={}, # empty dict should be excluded - ) - - settings = options.to_provider_settings() - - # Empty logit_bias and metadata should be excluded - assert "logit_bias" not in settings - assert "metadata" not in settings - assert settings["model"] == "gpt-4o" - - # region AgentRunResponse @@ -1905,7 +1800,8 @@ def test_content_roundtrip_serialization(content_class: type[BaseContent], init_ elif isinstance(value, dict) and hasattr(reconstructed_value, "to_dict"): # Compare the dict with the serialized form of the object, excluding 'type' key reconstructed_dict = reconstructed_value.to_dict() - assert len(reconstructed_dict) == len(value) + if value: + assert len(reconstructed_dict) == len(value) else: assert reconstructed_value == value diff --git a/python/packages/core/tests/openai/test_openai_assistants_client.py b/python/packages/core/tests/openai/test_openai_assistants_client.py index 90947dd437..be1a059b58 100644 --- a/python/packages/core/tests/openai/test_openai_assistants_client.py +++ b/python/packages/core/tests/openai/test_openai_assistants_client.py @@ -193,6 +193,18 @@ def test_openai_assistants_client_init_with_default_headers(openai_unit_test_env assert chat_client.client.default_headers[key] == value +def test_openai_assistants_client_instructions_sent_once(mock_async_openai: MagicMock) -> None: + """Ensure instructions are only included once for OpenAI Assistants requests.""" + chat_client = create_test_openai_assistants_client(mock_async_openai) + instructions = "You are a helpful assistant." + chat_options = ChatOptions(instructions=instructions) + + prepared_messages = chat_client.prepare_messages([ChatMessage(role=Role.USER, text="Hello")], chat_options) + run_options, _ = chat_client._prepare_options(prepared_messages, chat_options) # type: ignore[reportPrivateUsage] + + assert run_options.get("instructions") == instructions + + async def test_openai_assistants_client_get_assistant_id_or_create_existing_assistant( mock_async_openai: MagicMock, ) -> None: diff --git a/python/packages/core/tests/openai/test_openai_chat_client.py b/python/packages/core/tests/openai/test_openai_chat_client.py index d159091311..63db8c071e 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client.py +++ b/python/packages/core/tests/openai/test_openai_chat_client.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. +import json import os from typing import Annotated from unittest.mock import MagicMock, patch @@ -99,6 +100,18 @@ def test_init_base_url_from_settings_env() -> None: assert str(client.client.base_url) == "https://custom-openai-endpoint.com/v1/" +def test_openai_chat_client_instructions_sent_once(openai_unit_test_env: dict[str, str]) -> None: + """Ensure instructions are only included once for OpenAI chat requests.""" + client = OpenAIChatClient() + instructions = "You are a helpful assistant." + chat_options = ChatOptions(instructions=instructions) + + prepared_messages = client.prepare_messages([ChatMessage(role="user", text="Hello")], chat_options) + request_options = client._prepare_options(prepared_messages, chat_options) # type: ignore[reportPrivateUsage] + + assert json.dumps(request_options).count(instructions) == 1 + + @pytest.mark.parametrize("exclude_list", [["OPENAI_CHAT_MODEL_ID"]], indirect=True) def test_init_with_empty_model_id(openai_unit_test_env: dict[str, str]) -> None: with pytest.raises(ServiceInitializationError): diff --git a/python/packages/core/tests/openai/test_openai_chat_client_base.py b/python/packages/core/tests/openai/test_openai_chat_client_base.py index 63b4b9394b..86d41d9595 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client_base.py +++ b/python/packages/core/tests/openai/test_openai_chat_client_base.py @@ -71,9 +71,7 @@ async def test_cmc( chat_history.append(ChatMessage(role="user", text="hello world")) openai_chat_completion = OpenAIChatClient() - await openai_chat_completion.get_response( - messages=chat_history, - ) + await openai_chat_completion.get_response(messages=chat_history) mock_create.assert_awaited_once_with( model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], stream=False, @@ -189,6 +187,26 @@ async def test_cmc_general_exception( ) +@patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) +async def test_cmc_additional_properties( + mock_create: AsyncMock, + chat_history: list[ChatMessage], + mock_chat_completion_response: ChatCompletion, + openai_unit_test_env: dict[str, str], +): + mock_create.return_value = mock_chat_completion_response + chat_history.append(ChatMessage(role="user", text="hello world")) + + openai_chat_completion = OpenAIChatClient() + await openai_chat_completion.get_response(messages=chat_history, additional_properties={"reasoning_effort": "low"}) + mock_create.assert_awaited_once_with( + model=openai_unit_test_env["OPENAI_CHAT_MODEL_ID"], + stream=False, + messages=openai_chat_completion._prepare_chat_history_for_request(chat_history), # type: ignore + reasoning_effort="low", + ) + + # region Streaming diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index f42d1238dd..0d59119568 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -2,12 +2,14 @@ import asyncio import base64 +import json import os from typing import Annotated from unittest.mock import MagicMock, patch import pytest from openai import BadRequestError +from openai.types.responses.response_reasoning_item import Summary from openai.types.responses.response_reasoning_summary_text_delta_event import ResponseReasoningSummaryTextDeltaEvent from openai.types.responses.response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent from openai.types.responses.response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent @@ -132,6 +134,18 @@ def test_init_with_default_header(openai_unit_test_env: dict[str, str]) -> None: assert openai_responses_client.client.default_headers[key] == value +def test_openai_responses_client_instructions_sent_once(openai_unit_test_env: dict[str, str]) -> None: + """Ensure instructions are only included once for OpenAI Responses requests.""" + client = OpenAIResponsesClient() + instructions = "You are a helpful assistant." + chat_options = ChatOptions(instructions=instructions) + + prepared_messages = client.prepare_messages([ChatMessage(role="user", text="Hello")], chat_options) + request_options = client._prepare_options(prepared_messages, chat_options) # type: ignore[reportPrivateUsage] + + assert json.dumps(request_options).count(instructions) == 1 + + @pytest.mark.parametrize("exclude_list", [["OPENAI_RESPONSES_MODEL_ID"]], indirect=True) def test_init_with_empty_model_id(openai_unit_test_env: dict[str, str]) -> None: with pytest.raises(ServiceInitializationError): @@ -209,7 +223,7 @@ def test_get_response_with_all_parameters() -> None: instructions="You are a helpful assistant", max_tokens=100, parallel_tool_calls=True, - model="gpt-4", + model_id="gpt-4", previous_response_id="prev-123", reasoning={"chain_of_thought": "enabled"}, service_tier="auto", @@ -535,13 +549,13 @@ def test_response_content_creation_with_reasoning() -> None: mock_reasoning_item = MagicMock() mock_reasoning_item.type = "reasoning" mock_reasoning_item.content = [mock_reasoning_content] - mock_reasoning_item.summary = ["Summary"] + mock_reasoning_item.summary = [Summary(text="Summary", type="summary_text")] mock_response.output = [mock_reasoning_item] response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore - assert len(response.messages[0].contents) == 1 + assert len(response.messages[0].contents) == 2 assert isinstance(response.messages[0].contents[0], TextReasoningContent) assert response.messages[0].contents[0].text == "Reasoning step" @@ -1536,11 +1550,9 @@ async def test_openai_responses_client_agent_chat_options_run_level() -> None: instructions="You are a helpful assistant.", ) as agent: response = await agent.run( - "Provide a brief, helpful response.", - max_tokens=100, - temperature=0.7, - top_p=0.9, - seed=123, + "Provide a brief, helpful response about why the sky blue is.", + max_tokens=600, + model_id="gpt-4o", user="comprehensive-test-user", tools=[get_weather], tool_choice="auto", @@ -2077,7 +2089,6 @@ def test_prepare_options_store_parameter_handling() -> None: chat_options = ChatOptions(store=False, conversation_id="") options = client._prepare_options(messages, chat_options) # type: ignore assert options["store"] is False - assert "previous_response_id" not in options chat_options = ChatOptions(store=None, conversation_id=None) options = client._prepare_options(messages, chat_options) # type: ignore diff --git a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py new file mode 100644 index 0000000000..8124f6253d --- /dev/null +++ b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py @@ -0,0 +1,122 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Tests for AgentExecutor handling of tool calls and results in streaming mode.""" + +from collections.abc import AsyncIterable +from typing import Any + +from agent_framework import ( + AgentExecutor, + AgentRunResponse, + AgentRunResponseUpdate, + AgentRunUpdateEvent, + AgentThread, + BaseAgent, + ChatMessage, + FunctionCallContent, + FunctionResultContent, + Role, + TextContent, + WorkflowBuilder, +) + + +class _ToolCallingAgent(BaseAgent): + """Mock agent that simulates tool calls and results in streaming mode.""" + + def __init__(self, **kwargs: Any) -> None: + super().__init__(**kwargs) + + async def run( + self, + messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AgentRunResponse: + """Non-streaming run - not used in this test.""" + return AgentRunResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="done")]) + + async def run_stream( + self, + messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AsyncIterable[AgentRunResponseUpdate]: + """Simulate streaming with tool calls and results.""" + # First update: some text + yield AgentRunResponseUpdate( + contents=[TextContent(text="Let me search for that...")], + role=Role.ASSISTANT, + ) + + # Second update: tool call (no text!) + yield AgentRunResponseUpdate( + contents=[ + FunctionCallContent( + call_id="call_123", + name="search", + arguments={"query": "weather"}, + ) + ], + role=Role.ASSISTANT, + ) + + # Third update: tool result (no text!) + yield AgentRunResponseUpdate( + contents=[ + FunctionResultContent( + call_id="call_123", + result={"temperature": 72, "condition": "sunny"}, + ) + ], + role=Role.TOOL, + ) + + # Fourth update: final text response + yield AgentRunResponseUpdate( + contents=[TextContent(text="The weather is sunny, 72°F.")], + role=Role.ASSISTANT, + ) + + +async def test_agent_executor_emits_tool_calls_in_streaming_mode() -> None: + """Test that AgentExecutor emits updates containing FunctionCallContent and FunctionResultContent.""" + # Arrange + agent = _ToolCallingAgent(id="tool_agent", name="ToolAgent") + agent_exec = AgentExecutor(agent, id="tool_exec") + + workflow = WorkflowBuilder().set_start_executor(agent_exec).build() + + # Act: run in streaming mode + events: list[AgentRunUpdateEvent] = [] + async for event in workflow.run_stream("What's the weather?"): + if isinstance(event, AgentRunUpdateEvent): + events.append(event) + + # Assert: we should receive 4 events (text, function call, function result, text) + assert len(events) == 4, f"Expected 4 events, got {len(events)}" + + # First event: text update + assert events[0].data is not None + assert isinstance(events[0].data.contents[0], TextContent) + assert "Let me search" in events[0].data.contents[0].text + + # Second event: function call + assert events[1].data is not None + assert isinstance(events[1].data.contents[0], FunctionCallContent) + func_call = events[1].data.contents[0] + assert func_call.call_id == "call_123" + assert func_call.name == "search" + + # Third event: function result + assert events[2].data is not None + assert isinstance(events[2].data.contents[0], FunctionResultContent) + func_result = events[2].data.contents[0] + assert func_result.call_id == "call_123" + + # Fourth event: final text + assert events[3].data is not None + assert isinstance(events[3].data.contents[0], TextContent) + assert "sunny" in events[3].data.contents[0].text diff --git a/python/packages/core/tests/workflow/test_checkpoint_decode.py b/python/packages/core/tests/workflow/test_checkpoint_decode.py index 1947b3fb41..08c10aa9a9 100644 --- a/python/packages/core/tests/workflow/test_checkpoint_decode.py +++ b/python/packages/core/tests/workflow/test_checkpoint_decode.py @@ -3,7 +3,7 @@ from dataclasses import dataclass # noqa: I001 from typing import Any, cast -from agent_framework._workflows._executor import RequestInfoMessage, RequestResponse +from agent_framework._workflows._request_info_executor import RequestInfoMessage, RequestResponse from agent_framework._workflows._runner_context import ( # type: ignore _decode_checkpoint_value, # type: ignore _encode_checkpoint_value, # type: ignore diff --git a/python/packages/core/tests/workflow/test_magentic.py b/python/packages/core/tests/workflow/test_magentic.py index bb6984cc74..b52449a928 100644 --- a/python/packages/core/tests/workflow/test_magentic.py +++ b/python/packages/core/tests/workflow/test_magentic.py @@ -344,9 +344,10 @@ async def test_magentic_checkpoint_resume_round_trip(): assert orchestrator._context is not None # type: ignore[reportPrivateUsage] assert orchestrator._context.chat_history # type: ignore[reportPrivateUsage] - assert orchestrator._context.chat_history[0].text == task_text # type: ignore[reportPrivateUsage] assert orchestrator._task_ledger is not None # type: ignore[reportPrivateUsage] assert manager2.task_ledger is not None + # Initial message should be the task ledger plan + assert orchestrator._context.chat_history[0].text == orchestrator._task_ledger.text # type: ignore[reportPrivateUsage] class _DummyExec(Executor): @@ -690,3 +691,47 @@ async def test_magentic_checkpoint_resume_rejects_participant_renames(): responses={req_event.request_id: MagenticPlanReviewReply(decision=MagenticPlanReviewDecision.APPROVE)}, ): pass + + +class NotProgressingManager(MagenticManagerBase): + """ + A manager that never marks progress being made, to test stall/reset limits. + """ + + async def plan(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage(role=Role.ASSISTANT, text="ledger") + + async def replan(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage(role=Role.ASSISTANT, text="re-ledger") + + async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: + return MagenticProgressLedger( + is_request_satisfied=MagenticProgressLedgerItem(reason="r", answer=False), + is_in_loop=MagenticProgressLedgerItem(reason="r", answer=True), + is_progress_being_made=MagenticProgressLedgerItem(reason="r", answer=False), + next_speaker=MagenticProgressLedgerItem(reason="r", answer="agentA"), + instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="done"), + ) + + async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage(role=Role.ASSISTANT, text="final") + + +async def test_magentic_stall_and_reset_successfully(): + manager = NotProgressingManager(max_round_count=10, max_stall_count=0, max_reset_count=1) + + wf = MagenticBuilder().participants(agentA=_DummyExec("agentA")).with_standard_manager(manager).build() + + events: list[WorkflowEvent] = [] + async for ev in wf.run_stream("test limits"): + events.append(ev) + + idle_status = next( + (e for e in events if isinstance(e, WorkflowStatusEvent) and e.state == WorkflowRunState.IDLE), None + ) + assert idle_status is not None + output_event = next((e for e in events if isinstance(e, WorkflowOutputEvent)), None) + assert output_event is not None + assert isinstance(output_event.data, ChatMessage) + assert output_event.data.text is not None + assert output_event.data.text == "re-ledger" diff --git a/python/packages/core/tests/workflow/test_request_info_executor_rehydrate.py b/python/packages/core/tests/workflow/test_request_info_executor_rehydrate.py index dee1a30c25..2b52a04f61 100644 --- a/python/packages/core/tests/workflow/test_request_info_executor_rehydrate.py +++ b/python/packages/core/tests/workflow/test_request_info_executor_rehydrate.py @@ -7,7 +7,7 @@ from agent_framework._workflows._checkpoint import CheckpointStorage, WorkflowCheckpoint from agent_framework._workflows._events import RequestInfoEvent, WorkflowEvent -from agent_framework._workflows._executor import ( +from agent_framework._workflows._request_info_executor import ( PendingRequestDetails, RequestInfoExecutor, RequestInfoMessage, diff --git a/python/packages/devui/README.md b/python/packages/devui/README.md index a76e1e5e4e..8c70a71f39 100644 --- a/python/packages/devui/README.md +++ b/python/packages/devui/README.md @@ -78,21 +78,65 @@ devui ./agents --tracing framework ## OpenAI-Compatible API -For convenience, you can interact with the agents/workflows using the standard OpenAI API format. Just specify the `entity_id` in the `extra_body` field. This can be an `agent_id` or `workflow_id`. +For convenience, DevUI provides an OpenAI Responses backend API. This means you can run the backend and also use the OpenAI client sdk to connect to it. Use **agent/workflow name as the model**, and set streaming to `True` as needed. ```bash -# Standard OpenAI format +# Simple - use your entity name as the model curl -X POST http://localhost:8080/v1/responses \ -H "Content-Type: application/json" \ -d @- << 'EOF' { - "model": "agent-framework", - "input": "Hello world", - "extra_body": {"entity_id": "weather_agent"} + "model": "weather_agent", + "input": "Hello world" } +``` + +Or use the OpenAI Python SDK: + +```python +from openai import OpenAI + +client = OpenAI( + base_url="http://localhost:8080/v1", + api_key="not-needed" # API key not required for local DevUI +) + +response = client.responses.create( + model="weather_agent", # Your agent/workflow name + input="What's the weather in Seattle?" +) + +# Extract text from response +print(response.output[0].content[0].text) +# Supports streaming with stream=True +``` + +### Multi-turn Conversations + +Use the standard OpenAI `conversation` parameter for multi-turn conversations: +```python +# Create a conversation +conversation = client.conversations.create( + metadata={"agent_id": "weather_agent"} +) + +# Use it across multiple turns +response1 = client.responses.create( + model="weather_agent", + input="What's the weather in Seattle?", + conversation=conversation.id +) + +response2 = client.responses.create( + model="weather_agent", + input="How about tomorrow?", + conversation=conversation.id # Continues the conversation! +) ``` +**How it works:** DevUI automatically retrieves the conversation's message history from the stored thread and passes it to the agent. You don't need to manually manage message history - just provide the same `conversation` ID for follow-up requests. + ## CLI Options ```bash @@ -109,30 +153,79 @@ Options: ## Key Endpoints +## API Mapping + +Given that DevUI offers an OpenAI Responses API, it internally maps messages and events from Agent Framework to OpenAI Responses API events (in `_mapper.py`). For transparency, this mapping is shown below: + +| Agent Framework Content | OpenAI Event/Type | Status | +| ------------------------------- | ---------------------------------------- | -------- | +| `TextContent` | `response.output_text.delta` | Standard | +| `TextReasoningContent` | `response.reasoning.delta` | Standard | +| `FunctionCallContent` (initial) | `response.output_item.added` | Standard | +| `FunctionCallContent` (args) | `response.function_call_arguments.delta` | Standard | +| `FunctionResultContent` | `response.function_result.complete` | DevUI | +| `ErrorContent` | `response.error` | Standard | +| `UsageContent` | Final `Response.usage` field (not streamed) | Standard | +| `WorkflowEvent` | `response.workflow_event.complete` | DevUI | +| `DataContent`, `UriContent` | `response.trace.complete` | DevUI | + +- **Standard** = OpenAI Responses API spec +- **DevUI** = Custom extensions for Agent Framework features (workflows, traces, function results) + +### OpenAI Responses API Compliance + +DevUI follows the OpenAI Responses API specification for maximum compatibility: + +**Standard OpenAI Types Used:** +- `ResponseOutputItemAddedEvent` - Output item notifications (function calls) +- `Response.usage` - Token usage (in final response, not streamed) +- All standard text, reasoning, and function call events + +**Custom DevUI Extensions:** +- `response.function_result.complete` - Function execution results (DevUI executes functions, OpenAI doesn't) +- `response.workflow_event.complete` - Agent Framework workflow events +- `response.trace.complete` - Execution traces for debugging + +These custom extensions are clearly namespaced and can be safely ignored by standard OpenAI clients. + +### Entity Management + - `GET /v1/entities` - List discovered agents/workflows - `GET /v1/entities/{entity_id}/info` - Get detailed entity information - `POST /v1/entities/add` - Add entity from URL (for gallery samples) - `DELETE /v1/entities/{entity_id}` - Remove remote entity + +### Execution (OpenAI Responses API) + - `POST /v1/responses` - Execute agent/workflow (streaming or sync) + +### Conversations (OpenAI Standard) + +- `POST /v1/conversations` - Create conversation +- `GET /v1/conversations/{id}` - Get conversation +- `POST /v1/conversations/{id}` - Update conversation metadata +- `DELETE /v1/conversations/{id}` - Delete conversation +- `GET /v1/conversations?agent_id={id}` - List conversations _(DevUI extension)_ +- `POST /v1/conversations/{id}/items` - Add items to conversation +- `GET /v1/conversations/{id}/items` - List conversation items +- `GET /v1/conversations/{id}/items/{item_id}` - Get conversation item + +### Health + - `GET /health` - Health check -- `POST /v1/threads` - Create thread for agent (optional) -- `GET /v1/threads?agent_id={id}` - List threads for agent -- `GET /v1/threads/{thread_id}` - Get thread info -- `DELETE /v1/threads/{thread_id}` - Delete thread -- `GET /v1/threads/{thread_id}/messages` - Get thread messages ## Implementation - **Discovery**: `agent_framework_devui/_discovery.py` - **Execution**: `agent_framework_devui/_executor.py` - **Message Mapping**: `agent_framework_devui/_mapper.py` -- **Session Management**: `agent_framework_devui/_session.py` +- **Conversations**: `agent_framework_devui/_conversations.py` - **API Server**: `agent_framework_devui/_server.py` - **CLI**: `agent_framework_devui/_cli.py` ## Examples -See `samples/` for working agent and workflow implementations. +See working implementations in `python/samples/getting_started/devui/` ## License diff --git a/python/packages/devui/agent_framework_devui/_conversations.py b/python/packages/devui/agent_framework_devui/_conversations.py new file mode 100644 index 0000000000..5b892c8f35 --- /dev/null +++ b/python/packages/devui/agent_framework_devui/_conversations.py @@ -0,0 +1,473 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Conversation storage abstraction for OpenAI Conversations API. + +This module provides a clean abstraction layer for managing conversations +while wrapping AgentFramework's AgentThread underneath. +""" + +import time +import uuid +from abc import ABC, abstractmethod +from typing import Any, Literal, cast + +from agent_framework import AgentThread, ChatMessage +from openai.types.conversations import Conversation, ConversationDeletedResource +from openai.types.conversations.conversation_item import ConversationItem +from openai.types.conversations.message import Message +from openai.types.conversations.text_content import TextContent +from openai.types.responses import ( + ResponseFunctionToolCallItem, + ResponseFunctionToolCallOutputItem, + ResponseInputFile, + ResponseInputImage, +) + +# Type alias for OpenAI Message role literals +MessageRole = Literal["unknown", "user", "assistant", "system", "critic", "discriminator", "developer", "tool"] + + +class ConversationStore(ABC): + """Abstract base class for conversation storage. + + Provides OpenAI Conversations API interface while managing + AgentThread instances underneath. + """ + + @abstractmethod + def create_conversation(self, metadata: dict[str, str] | None = None) -> Conversation: + """Create a new conversation (wraps AgentThread creation). + + Args: + metadata: Optional metadata dict (e.g., {"agent_id": "weather_agent"}) + + Returns: + Conversation object with generated ID + """ + pass + + @abstractmethod + def get_conversation(self, conversation_id: str) -> Conversation | None: + """Retrieve conversation metadata. + + Args: + conversation_id: Conversation ID + + Returns: + Conversation object or None if not found + """ + pass + + @abstractmethod + def update_conversation(self, conversation_id: str, metadata: dict[str, str]) -> Conversation: + """Update conversation metadata. + + Args: + conversation_id: Conversation ID + metadata: New metadata dict + + Returns: + Updated Conversation object + + Raises: + ValueError: If conversation not found + """ + pass + + @abstractmethod + def delete_conversation(self, conversation_id: str) -> ConversationDeletedResource: + """Delete conversation (including AgentThread). + + Args: + conversation_id: Conversation ID + + Returns: + ConversationDeletedResource object + + Raises: + ValueError: If conversation not found + """ + pass + + @abstractmethod + async def add_items(self, conversation_id: str, items: list[dict[str, Any]]) -> list[ConversationItem]: + """Add items to conversation (syncs to AgentThread.message_store). + + Args: + conversation_id: Conversation ID + items: List of conversation items to add + + Returns: + List of added ConversationItem objects + + Raises: + ValueError: If conversation not found + """ + pass + + @abstractmethod + async def list_items( + self, conversation_id: str, limit: int = 100, after: str | None = None, order: str = "asc" + ) -> tuple[list[ConversationItem], bool]: + """List conversation items from AgentThread.message_store. + + Args: + conversation_id: Conversation ID + limit: Maximum number of items to return + after: Cursor for pagination (item_id) + order: Sort order ("asc" or "desc") + + Returns: + Tuple of (items list, has_more boolean) + + Raises: + ValueError: If conversation not found + """ + pass + + @abstractmethod + def get_item(self, conversation_id: str, item_id: str) -> ConversationItem | None: + """Get specific conversation item. + + Args: + conversation_id: Conversation ID + item_id: Item ID + + Returns: + ConversationItem or None if not found + """ + pass + + @abstractmethod + def get_thread(self, conversation_id: str) -> AgentThread | None: + """Get underlying AgentThread for execution (internal use). + + This is the critical method that allows the executor to get the + AgentThread for running agents with conversation context. + + Args: + conversation_id: Conversation ID + + Returns: + AgentThread object or None if not found + """ + pass + + @abstractmethod + def list_conversations_by_metadata(self, metadata_filter: dict[str, str]) -> list[Conversation]: + """Filter conversations by metadata (e.g., agent_id). + + Args: + metadata_filter: Metadata key-value pairs to match + + Returns: + List of matching Conversation objects + """ + pass + + +class InMemoryConversationStore(ConversationStore): + """In-memory conversation storage wrapping AgentThread. + + This implementation stores conversations in memory with their + underlying AgentThread instances for execution. + """ + + def __init__(self) -> None: + """Initialize in-memory conversation storage. + + Storage structure maps conversation IDs to conversation data including + the underlying AgentThread, metadata, and cached ConversationItems. + """ + self._conversations: dict[str, dict[str, Any]] = {} + + # Item index for O(1) lookup: {conversation_id: {item_id: ConversationItem}} + self._item_index: dict[str, dict[str, ConversationItem]] = {} + + def create_conversation(self, metadata: dict[str, str] | None = None) -> Conversation: + """Create a new conversation with underlying AgentThread.""" + conv_id = f"conv_{uuid.uuid4().hex}" + created_at = int(time.time()) + + # Create AgentThread with default ChatMessageStore + thread = AgentThread() + + self._conversations[conv_id] = { + "id": conv_id, + "thread": thread, + "metadata": metadata or {}, + "created_at": created_at, + "items": [], + } + + # Initialize item index for this conversation + self._item_index[conv_id] = {} + + return Conversation(id=conv_id, object="conversation", created_at=created_at, metadata=metadata) + + def get_conversation(self, conversation_id: str) -> Conversation | None: + """Retrieve conversation metadata.""" + conv_data = self._conversations.get(conversation_id) + if not conv_data: + return None + + return Conversation( + id=conv_data["id"], + object="conversation", + created_at=conv_data["created_at"], + metadata=conv_data.get("metadata"), + ) + + def update_conversation(self, conversation_id: str, metadata: dict[str, str]) -> Conversation: + """Update conversation metadata.""" + conv_data = self._conversations.get(conversation_id) + if not conv_data: + raise ValueError(f"Conversation {conversation_id} not found") + + conv_data["metadata"] = metadata + + return Conversation( + id=conv_data["id"], + object="conversation", + created_at=conv_data["created_at"], + metadata=metadata, + ) + + def delete_conversation(self, conversation_id: str) -> ConversationDeletedResource: + """Delete conversation and its AgentThread.""" + if conversation_id not in self._conversations: + raise ValueError(f"Conversation {conversation_id} not found") + + del self._conversations[conversation_id] + # Cleanup item index + self._item_index.pop(conversation_id, None) + + return ConversationDeletedResource(id=conversation_id, object="conversation.deleted", deleted=True) + + async def add_items(self, conversation_id: str, items: list[dict[str, Any]]) -> list[ConversationItem]: + """Add items to conversation and sync to AgentThread.""" + conv_data = self._conversations.get(conversation_id) + if not conv_data: + raise ValueError(f"Conversation {conversation_id} not found") + + thread: AgentThread = conv_data["thread"] + + # Convert items to ChatMessages and add to thread + chat_messages = [] + for item in items: + # Simple conversion - assume text content for now + role = item.get("role", "user") + content = item.get("content", []) + text = content[0].get("text", "") if content else "" + + chat_msg = ChatMessage(role=role, contents=[{"type": "text", "text": text}]) + chat_messages.append(chat_msg) + + # Add messages to AgentThread + await thread.on_new_messages(chat_messages) + + # Create Message objects (ConversationItem is a Union - use concrete Message type) + conv_items: list[ConversationItem] = [] + for msg in chat_messages: + item_id = f"item_{uuid.uuid4().hex}" + + # Extract role - handle both string and enum + role_str = msg.role.value if hasattr(msg.role, "value") else str(msg.role) + role = cast(MessageRole, role_str) # Safe: Agent Framework roles match OpenAI roles + + # Convert ChatMessage contents to OpenAI TextContent format + message_content = [] + for content_item in msg.contents: + if hasattr(content_item, "type") and content_item.type == "text": + # Extract text from TextContent object + text_value = getattr(content_item, "text", "") + message_content.append(TextContent(type="text", text=text_value)) + + # Create Message object (concrete type from ConversationItem union) + message = Message( + id=item_id, + type="message", # Required discriminator for union + role=role, + content=message_content, + status="completed", # Required field + ) + conv_items.append(message) + + # Cache items + conv_data["items"].extend(conv_items) + + # Update item index for O(1) lookup + if conversation_id not in self._item_index: + self._item_index[conversation_id] = {} + + for conv_item in conv_items: + if conv_item.id: # Guard against None + self._item_index[conversation_id][conv_item.id] = conv_item + + return conv_items + + async def list_items( + self, conversation_id: str, limit: int = 100, after: str | None = None, order: str = "asc" + ) -> tuple[list[ConversationItem], bool]: + """List conversation items from AgentThread message store. + + Converts AgentFramework ChatMessages to proper OpenAI ConversationItem types: + - Messages with text/images/files → Message + - Function calls → ResponseFunctionToolCallItem + - Function results → ResponseFunctionToolCallOutputItem + """ + conv_data = self._conversations.get(conversation_id) + if not conv_data: + raise ValueError(f"Conversation {conversation_id} not found") + + thread: AgentThread = conv_data["thread"] + + # Get messages from thread's message store + items: list[ConversationItem] = [] + if thread.message_store: + af_messages = await thread.message_store.list_messages() + + # Convert each AgentFramework ChatMessage to appropriate ConversationItem type(s) + for i, msg in enumerate(af_messages): + item_id = f"item_{i}" + role_str = msg.role.value if hasattr(msg.role, "value") else str(msg.role) + role = cast(MessageRole, role_str) # Safe: Agent Framework roles match OpenAI roles + + # Process each content item in the message + # A single ChatMessage may produce multiple ConversationItems + # (e.g., a message with both text and a function call) + message_contents: list[TextContent | ResponseInputImage | ResponseInputFile] = [] + function_calls = [] + function_results = [] + + for content in msg.contents: + content_type = getattr(content, "type", None) + + if content_type == "text": + # Text content for Message + text_value = getattr(content, "text", "") + message_contents.append(TextContent(type="text", text=text_value)) + + elif content_type == "data": + # Data content (images, files, PDFs) + uri = getattr(content, "uri", "") + media_type = getattr(content, "media_type", None) + + if media_type and media_type.startswith("image/"): + # Convert to ResponseInputImage + message_contents.append( + ResponseInputImage(type="input_image", image_url=uri, detail="auto") + ) + else: + # Convert to ResponseInputFile + # Extract filename from URI if possible + filename = None + if media_type == "application/pdf": + filename = "document.pdf" + + message_contents.append( + ResponseInputFile(type="input_file", file_url=uri, filename=filename) + ) + + elif content_type == "function_call": + # Function call - create separate ConversationItem + call_id = getattr(content, "call_id", None) + name = getattr(content, "name", "") + arguments = getattr(content, "arguments", "") + + if call_id and name: + function_calls.append( + ResponseFunctionToolCallItem( + id=f"{item_id}_call_{call_id}", + call_id=call_id, + name=name, + arguments=arguments, + type="function_call", + status="completed", + ) + ) + + elif content_type == "function_result": + # Function result - create separate ConversationItem + call_id = getattr(content, "call_id", None) + # Output is stored in additional_properties + output = "" + if hasattr(content, "additional_properties"): + output = content.additional_properties.get("output", "") + + if call_id: + function_results.append( + ResponseFunctionToolCallOutputItem( + id=f"{item_id}_result_{call_id}", + call_id=call_id, + output=output, + type="function_call_output", + status="completed", + ) + ) + + # Create ConversationItems based on what we found + # If message has text/images/files, create a Message item + if message_contents: + message = Message( + id=item_id, + type="message", + role=role, # type: ignore + content=message_contents, # type: ignore + status="completed", + ) + items.append(message) + + # Add function call items + items.extend(function_calls) + + # Add function result items + items.extend(function_results) + + # Apply pagination + if order == "desc": + items = items[::-1] + + start_idx = 0 + if after: + # Find the index after the cursor + for i, item in enumerate(items): + if item.id == after: + start_idx = i + 1 + break + + paginated_items = items[start_idx : start_idx + limit] + has_more = len(items) > start_idx + limit + + return paginated_items, has_more + + def get_item(self, conversation_id: str, item_id: str) -> ConversationItem | None: + """Get specific conversation item - O(1) lookup via index.""" + # Use index for O(1) lookup instead of linear search + conv_items = self._item_index.get(conversation_id) + if not conv_items: + return None + + return conv_items.get(item_id) + + def get_thread(self, conversation_id: str) -> AgentThread | None: + """Get AgentThread for execution - CRITICAL for agent.run_stream().""" + conv_data = self._conversations.get(conversation_id) + return conv_data["thread"] if conv_data else None + + def list_conversations_by_metadata(self, metadata_filter: dict[str, str]) -> list[Conversation]: + """Filter conversations by metadata (e.g., agent_id).""" + results = [] + for conv_data in self._conversations.values(): + conv_meta = conv_data.get("metadata", {}) + # Check if all filter items match + if all(conv_meta.get(k) == v for k, v in metadata_filter.items()): + results.append( + Conversation( + id=conv_data["id"], + object="conversation", + created_at=conv_data["created_at"], + metadata=conv_meta, + ) + ) + return results diff --git a/python/packages/devui/agent_framework_devui/_discovery.py b/python/packages/devui/agent_framework_devui/_discovery.py index d6ce45b53f..631ba60412 100644 --- a/python/packages/devui/agent_framework_devui/_discovery.py +++ b/python/packages/devui/agent_framework_devui/_discovery.py @@ -20,6 +20,10 @@ logger = logging.getLogger(__name__) +# Constants for remote entity fetching +REMOTE_FETCH_TIMEOUT_SECONDS = 30.0 +REMOTE_FETCH_MAX_SIZE_MB = 10 + class EntityDiscovery: """Discovery for Agent Framework entities - agents and workflows.""" @@ -116,16 +120,9 @@ async def create_entity_info_from_object( # Extract metadata with improved fallback naming name = getattr(entity_object, "name", None) if not name: - # In-memory entities: use ID with entity type prefix since no directory name available - entity_id_raw = getattr(entity_object, "id", None) - if entity_id_raw: - # Truncate UUID to first 8 characters for readability - short_id = str(entity_id_raw)[:8] if len(str(entity_id_raw)) > 8 else str(entity_id_raw) - name = f"{entity_type.title()} {short_id}" - else: - # Fallback to class name with entity type - class_name = entity_object.__class__.__name__ - name = f"{entity_type.title()} {class_name}" + # In-memory entities: use class name as it's more readable than UUID + class_name = entity_object.__class__.__name__ + name = f"{entity_type.title()} {class_name}" description = getattr(entity_object, "description", "") # Generate entity ID using Agent Framework specific naming @@ -142,43 +139,27 @@ async def create_entity_info_from_object( middleware_list = None if entity_type == "agent": - # Try to get instructions - if hasattr(entity_object, "chat_options") and hasattr(entity_object.chat_options, "instructions"): - instructions = entity_object.chat_options.instructions - - # Try to get model - check both chat_options and chat_client - if ( - hasattr(entity_object, "chat_options") - and hasattr(entity_object.chat_options, "model_id") - and entity_object.chat_options.model_id - ): - model = entity_object.chat_options.model_id - elif hasattr(entity_object, "chat_client") and hasattr(entity_object.chat_client, "model_id"): - model = entity_object.chat_client.model_id - - # Try to get chat client type - if hasattr(entity_object, "chat_client"): - chat_client_type = entity_object.chat_client.__class__.__name__ - - # Try to get context providers - if ( - hasattr(entity_object, "context_provider") - and entity_object.context_provider - and hasattr(entity_object.context_provider, "__class__") - ): - context_providers_list = [entity_object.context_provider.__class__.__name__] - - # Try to get middleware - if hasattr(entity_object, "middleware") and entity_object.middleware: - middleware_list = [] - for m in entity_object.middleware: - # Try multiple ways to get a good name for middleware - if hasattr(m, "__name__"): # Function or callable - middleware_list.append(m.__name__) - elif hasattr(m, "__class__"): # Class instance - middleware_list.append(m.__class__.__name__) - else: - middleware_list.append(str(m)) + from ._utils import extract_agent_metadata + + agent_meta = extract_agent_metadata(entity_object) + instructions = agent_meta["instructions"] + model = agent_meta["model"] + chat_client_type = agent_meta["chat_client_type"] + context_providers_list = agent_meta["context_providers"] + middleware_list = agent_meta["middleware"] + + # Log helpful info about agent capabilities (before creating EntityInfo) + if entity_type == "agent": + has_run_stream = hasattr(entity_object, "run_stream") + has_run = hasattr(entity_object, "run") + + if not has_run_stream and has_run: + logger.info( + f"Agent '{entity_id}' only has run() (non-streaming). " + "DevUI will automatically convert to streaming." + ) + elif not has_run_stream and not has_run: + logger.warning(f"Agent '{entity_id}' lacks both run() and run_stream() methods. May not work.") # Create EntityInfo with Agent Framework specifics return EntityInfo( @@ -189,7 +170,7 @@ async def create_entity_info_from_object( framework="agent_framework", tools=[str(tool) for tool in (tools_list or [])], instructions=instructions, - model=model, + model_id=model, chat_client_type=chat_client_type, context_providers=context_providers_list, middleware=middleware_list, @@ -444,7 +425,9 @@ def _is_valid_agent(self, obj: Any) -> bool: pass # Fallback to duck typing for agent protocol - if hasattr(obj, "run_stream") and hasattr(obj, "id") and hasattr(obj, "name"): + # Agent must have either run_stream() or run() method, plus id and name + has_execution_method = hasattr(obj, "run_stream") or hasattr(obj, "run") + if has_execution_method and hasattr(obj, "id") and hasattr(obj, "name"): return True except (TypeError, AttributeError): @@ -482,13 +465,9 @@ async def _register_entity_from_object( # Extract metadata from the live object with improved fallback naming name = getattr(obj, "name", None) if not name: - entity_id_raw = getattr(obj, "id", None) - if entity_id_raw: - # Truncate UUID to first 8 characters for readability - short_id = str(entity_id_raw)[:8] if len(str(entity_id_raw)) > 8 else str(entity_id_raw) - name = f"{obj_type.title()} {short_id}" - else: - name = f"{obj_type.title()} {obj.__class__.__name__}" + # Use class name as it's more readable than UUID + class_name = obj.__class__.__name__ + name = f"{obj_type.title()} {class_name}" description = getattr(obj, "description", None) tools = await self._extract_tools_from_object(obj, obj_type) @@ -505,39 +484,14 @@ async def _register_entity_from_object( middleware_list = None if obj_type == "agent": - # Try to get instructions - if hasattr(obj, "chat_options") and hasattr(obj.chat_options, "instructions"): - instructions = obj.chat_options.instructions - - # Try to get model - check both chat_options and chat_client - if hasattr(obj, "chat_options") and hasattr(obj.chat_options, "model_id") and obj.chat_options.model_id: - model = obj.chat_options.model_id - elif hasattr(obj, "chat_client") and hasattr(obj.chat_client, "model_id"): - model = obj.chat_client.model_id - - # Try to get chat client type - if hasattr(obj, "chat_client"): - chat_client_type = obj.chat_client.__class__.__name__ - - # Try to get context providers - if ( - hasattr(obj, "context_provider") - and obj.context_provider - and hasattr(obj.context_provider, "__class__") - ): - context_providers_list = [obj.context_provider.__class__.__name__] - - # Try to get middleware - if hasattr(obj, "middleware") and obj.middleware: - middleware_list = [] - for m in obj.middleware: - # Try multiple ways to get a good name for middleware - if hasattr(m, "__name__"): # Function or callable - middleware_list.append(m.__name__) - elif hasattr(m, "__class__"): # Class instance - middleware_list.append(m.__class__.__name__) - else: - middleware_list.append(str(m)) + from ._utils import extract_agent_metadata + + agent_meta = extract_agent_metadata(obj) + instructions = agent_meta["instructions"] + model = agent_meta["model"] + chat_client_type = agent_meta["chat_client_type"] + context_providers_list = agent_meta["context_providers"] + middleware_list = agent_meta["middleware"] entity_info = EntityInfo( id=entity_id, @@ -547,7 +501,7 @@ async def _register_entity_from_object( description=description, tools=tools_union, instructions=instructions, - model=model, + model_id=model, chat_client_type=chat_client_type, context_providers=context_providers_list, middleware=middleware_list, @@ -628,7 +582,7 @@ def _generate_entity_id(self, entity: Any, entity_type: str, source: str = "dire source: Source of entity (directory, in_memory, remote) Returns: - Unique entity ID with format: {type}_{source}_{name}_{uuid8} + Unique entity ID with format: {type}_{source}_{name}_{uuid} """ import re @@ -644,10 +598,10 @@ def _generate_entity_id(self, entity: Any, entity_type: str, source: str = "dire else: base_name = "entity" - # Generate short UUID (8 chars = 4 billion combinations) - short_uuid = uuid.uuid4().hex[:8] + # Generate full UUID for guaranteed uniqueness + full_uuid = uuid.uuid4().hex - return f"{entity_type}_{source}_{base_name}_{short_uuid}" + return f"{entity_type}_{source}_{base_name}_{full_uuid}" async def fetch_remote_entity( self, url: str, metadata: dict[str, Any] | None = None @@ -722,12 +676,10 @@ def _normalize_url(self, url: str) -> str: return url - async def _fetch_url_content(self, url: str, max_size_mb: int = 10) -> str | None: + async def _fetch_url_content(self, url: str, max_size_mb: int = REMOTE_FETCH_MAX_SIZE_MB) -> str | None: """Fetch content from URL with size and timeout limits.""" try: - timeout = 30.0 # 30 second timeout - - async with httpx.AsyncClient(timeout=timeout) as client: + async with httpx.AsyncClient(timeout=REMOTE_FETCH_TIMEOUT_SECONDS) as client: response = await client.get(url) if response.status_code != 200: diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index 73edfde74f..68740732e9 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -5,12 +5,12 @@ import json import logging import os -import uuid from collections.abc import AsyncGenerator -from typing import Any, get_origin +from typing import Any -from agent_framework import AgentThread +from agent_framework import AgentProtocol +from ._conversations import ConversationStore, InMemoryConversationStore from ._discovery import EntityDiscovery from ._mapper import MessageMapper from ._tracing import capture_traces @@ -29,21 +29,26 @@ class EntityNotFoundError(Exception): class AgentFrameworkExecutor: """Executor for Agent Framework entities - agents and workflows.""" - def __init__(self, entity_discovery: EntityDiscovery, message_mapper: MessageMapper): + def __init__( + self, + entity_discovery: EntityDiscovery, + message_mapper: MessageMapper, + conversation_store: ConversationStore | None = None, + ): """Initialize Agent Framework executor. Args: entity_discovery: Entity discovery instance message_mapper: Message mapper instance + conversation_store: Optional conversation store (defaults to in-memory) """ self.entity_discovery = entity_discovery self.message_mapper = message_mapper self._setup_tracing_provider() self._setup_agent_framework_tracing() - # Minimal thread storage - no metadata needed - self.thread_storage: dict[str, AgentThread] = {} - self.agent_threads: dict[str, list[str]] = {} # agent_id -> thread_ids + # Use provided conversation store or default to in-memory + self.conversation_store = conversation_store or InMemoryConversationStore() def _setup_tracing_provider(self) -> None: """Set up our own TracerProvider so we can add processors.""" @@ -83,199 +88,6 @@ def _setup_agent_framework_tracing(self) -> None: else: logger.debug("ENABLE_OTEL not set, skipping observability setup") - # Thread Management Methods - def create_thread(self, agent_id: str) -> str: - """Create new thread for agent.""" - thread_id = f"thread_{uuid.uuid4().hex[:8]}" - thread = AgentThread() - - self.thread_storage[thread_id] = thread - - if agent_id not in self.agent_threads: - self.agent_threads[agent_id] = [] - self.agent_threads[agent_id].append(thread_id) - - return thread_id - - def get_thread(self, thread_id: str) -> AgentThread | None: - """Get AgentThread by ID.""" - return self.thread_storage.get(thread_id) - - def list_threads_for_agent(self, agent_id: str) -> list[str]: - """List thread IDs for agent.""" - return self.agent_threads.get(agent_id, []) - - def get_agent_for_thread(self, thread_id: str) -> str | None: - """Find which agent owns this thread.""" - for agent_id, thread_ids in self.agent_threads.items(): - if thread_id in thread_ids: - return agent_id - return None - - def delete_thread(self, thread_id: str) -> bool: - """Delete thread.""" - if thread_id not in self.thread_storage: - return False - - for _agent_id, thread_ids in self.agent_threads.items(): - if thread_id in thread_ids: - thread_ids.remove(thread_id) - break - - del self.thread_storage[thread_id] - return True - - async def get_thread_messages(self, thread_id: str) -> list[dict[str, Any]]: - """Get messages from a thread's message store, preserving all content types for UI display.""" - thread = self.get_thread(thread_id) - if not thread or not thread.message_store: - return [] - - try: - # Get AgentFramework ChatMessage objects from thread - af_messages = await thread.message_store.list_messages() - - ui_messages = [] - for i, af_msg in enumerate(af_messages): - # Extract role value (handle enum) - role = af_msg.role.value if hasattr(af_msg.role, "value") else str(af_msg.role) - - # Skip tool/function messages - only show user and assistant messages - if role not in ["user", "assistant"]: - continue - - # Extract all user-facing content (text, images, files, etc.) - display_contents = self._extract_display_contents(af_msg.contents) - - # Skip messages with no displayable content - if not display_contents: - continue - - # Extract usage information if present - usage_data = None - for content in af_msg.contents: - content_type = getattr(content, "type", None) - if content_type == "usage": - details = getattr(content, "details", None) - if details: - usage_data = { - "total_tokens": getattr(details, "total_token_count", 0) or 0, - "prompt_tokens": getattr(details, "input_token_count", 0) or 0, - "completion_tokens": getattr(details, "output_token_count", 0) or 0, - } - break - - ui_message = { - "id": af_msg.message_id or f"restored-{i}", - "role": role, - "contents": display_contents, - "timestamp": __import__("datetime").datetime.now().isoformat(), - "author_name": af_msg.author_name, - "message_id": af_msg.message_id, - } - - # Add usage data if available - if usage_data: - ui_message["usage"] = usage_data - - ui_messages.append(ui_message) - - logger.info(f"Restored {len(ui_messages)} display messages for thread {thread_id}") - return ui_messages - - except Exception as e: - logger.error(f"Error getting thread messages: {e}") - import traceback - - logger.error(traceback.format_exc()) - return [] - - def _extract_display_contents(self, contents: list[Any]) -> list[dict[str, Any]]: - """Extract all user-facing content (text, images, files, etc.) from message contents. - - Filters out internal mechanics like function calls/results while preserving - all content types that should be displayed in the UI. - """ - display_contents = [] - - for content in contents: - content_type = getattr(content, "type", None) - - # Text content - if content_type == "text": - text = getattr(content, "text", "") - - # Handle double-encoded JSON from user messages - if text.startswith('{"role":'): - try: - import json - - parsed = json.loads(text) - if parsed.get("contents"): - for sub_content in parsed["contents"]: - if sub_content.get("type") == "text": - display_contents.append({"type": "text", "text": sub_content.get("text", "")}) - except Exception: - display_contents.append({"type": "text", "text": text}) - else: - display_contents.append({"type": "text", "text": text}) - - # Data content (images, files, PDFs, etc.) - elif content_type == "data": - display_contents.append({ - "type": "data", - "uri": getattr(content, "uri", ""), - "media_type": getattr(content, "media_type", None), - }) - - # URI content (external links to images/files) - elif content_type == "uri": - display_contents.append({ - "type": "uri", - "uri": getattr(content, "uri", ""), - "media_type": getattr(content, "media_type", None), - }) - - # Skip function_call, function_result, and other internal content types - - return display_contents - - async def serialize_thread(self, thread_id: str) -> dict[str, Any] | None: - """Serialize thread state for persistence.""" - thread = self.get_thread(thread_id) - if not thread: - return None - - try: - # Use AgentThread's built-in serialization - serialized_state = await thread.serialize() - - # Add our metadata - agent_id = self.get_agent_for_thread(thread_id) - serialized_state["metadata"] = {"agent_id": agent_id, "thread_id": thread_id} - - return serialized_state - - except Exception as e: - logger.error(f"Error serializing thread {thread_id}: {e}") - return None - - async def deserialize_thread(self, thread_id: str, agent_id: str, serialized_state: dict[str, Any]) -> bool: - """Deserialize thread state from persistence.""" - try: - thread = await AgentThread.deserialize(serialized_state) - # Store the restored thread - self.thread_storage[thread_id] = thread - if agent_id not in self.agent_threads: - self.agent_threads[agent_id] = [] - self.agent_threads[agent_id].append(thread_id) - - return True - - except Exception as e: - logger.error(f"Error deserializing thread {thread_id}: {e}") - return False - async def discover_entities(self) -> list[EntityInfo]: """Discover all available entities. @@ -390,7 +202,7 @@ async def execute_entity(self, entity_id: str, request: AgentFrameworkRequest) - yield {"type": "error", "message": str(e), "entity_id": entity_id} async def _execute_agent( - self, agent: Any, request: AgentFrameworkRequest, trace_collector: Any + self, agent: AgentProtocol, request: AgentFrameworkRequest, trace_collector: Any ) -> AsyncGenerator[Any, None]: """Execute Agent Framework agent with trace collection and optional thread support. @@ -406,34 +218,51 @@ async def _execute_agent( # Convert input to proper ChatMessage or string user_message = self._convert_input_to_chat_message(request.input) - # Get thread if provided in extra_body + # Get thread from conversation parameter (OpenAI standard!) thread = None - if request.extra_body and hasattr(request.extra_body, "thread_id") and request.extra_body.thread_id: - thread_id = request.extra_body.thread_id - thread = self.get_thread(thread_id) + conversation_id = request.get_conversation_id() + if conversation_id: + thread = self.conversation_store.get_thread(conversation_id) if thread: - logger.debug(f"Using existing thread: {thread_id}") + logger.debug(f"Using existing conversation: {conversation_id}") else: - logger.warning(f"Thread {thread_id} not found, proceeding without thread") + logger.warning(f"Conversation {conversation_id} not found, proceeding without thread") if isinstance(user_message, str): logger.debug(f"Executing agent with text input: {user_message[:100]}...") else: logger.debug(f"Executing agent with multimodal ChatMessage: {type(user_message)}") + # Check if agent supports streaming + if hasattr(agent, "run_stream") and callable(agent.run_stream): + # Use Agent Framework's native streaming with optional thread + if thread: + async for update in agent.run_stream(user_message, thread=thread): + for trace_event in trace_collector.get_pending_events(): + yield trace_event - # Use Agent Framework's native streaming with optional thread - if thread: - async for update in agent.run_stream(user_message, thread=thread): - for trace_event in trace_collector.get_pending_events(): - yield trace_event + yield update + else: + async for update in agent.run_stream(user_message): + for trace_event in trace_collector.get_pending_events(): + yield trace_event + + yield update + elif hasattr(agent, "run") and callable(agent.run): + # Non-streaming agent - use run() and yield complete response + logger.info("Agent lacks run_stream(), using run() method (non-streaming)") + if thread: + response = await agent.run(user_message, thread=thread) + else: + response = await agent.run(user_message) - yield update - else: - async for update in agent.run_stream(user_message): - for trace_event in trace_collector.get_pending_events(): - yield trace_event + # Yield trace events before response + for trace_event in trace_collector.get_pending_events(): + yield trace_event - yield update + # Yield the complete response (mapper will convert to streaming events) + yield response + else: + raise ValueError("Agent must implement either run() or run_stream() method") except Exception as e: logger.error(f"Error in agent execution: {e}") @@ -455,8 +284,8 @@ async def _execute_workflow( try: # Get input data - prefer structured data from extra_body input_data: str | list[Any] | dict[str, Any] - if request.extra_body and hasattr(request.extra_body, "input_data") and request.extra_body.input_data: - input_data = request.extra_body.input_data + if request.extra_body and isinstance(request.extra_body, dict) and request.extra_body.get("input_data"): + input_data = request.extra_body.get("input_data") # type: ignore logger.debug(f"Using structured input_data from extra_body: {type(input_data)}") else: input_data = request.input @@ -483,6 +312,9 @@ async def _execute_workflow( def _convert_input_to_chat_message(self, input_data: Any) -> Any: """Convert OpenAI Responses API input to Agent Framework ChatMessage or string. + Handles various input formats including text, images, files, and multimodal content. + Falls back to string extraction for simple cases. + Args: input_data: OpenAI ResponseInputParam (List[ResponseInputItemParam]) @@ -512,6 +344,9 @@ def _convert_openai_input_to_chat_message( ) -> Any: """Convert OpenAI ResponseInputParam to Agent Framework ChatMessage. + Processes text, images, files, and other content types from OpenAI format + to Agent Framework ChatMessage with appropriate content objects. + Args: input_items: List of OpenAI ResponseInputItemParam objects (dicts or objects) ChatMessage: ChatMessage class for creating chat messages @@ -597,6 +432,40 @@ def _convert_openai_input_to_chat_message( elif file_url: contents.append(DataContent(uri=file_url, media_type=media_type)) + elif content_type == "function_approval_response": + # Handle function approval response (DevUI extension) + try: + from agent_framework import FunctionApprovalResponseContent, FunctionCallContent + + request_id = content_item.get("request_id", "") + approved = content_item.get("approved", False) + function_call_data = content_item.get("function_call", {}) + + # Create FunctionCallContent from the function_call data + function_call = FunctionCallContent( + call_id=function_call_data.get("id", ""), + name=function_call_data.get("name", ""), + arguments=function_call_data.get("arguments", {}), + ) + + # Create FunctionApprovalResponseContent with correct signature + approval_response = FunctionApprovalResponseContent( + approved, # positional argument + id=request_id, # keyword argument 'id', NOT 'request_id' + function_call=function_call, # FunctionCallContent object + ) + contents.append(approval_response) + logger.info( + f"Added FunctionApprovalResponseContent: id={request_id}, " + f"approved={approved}, call_id={function_call.call_id}" + ) + except ImportError: + logger.warning( + "FunctionApprovalResponseContent not available in agent_framework" + ) + except Exception as e: + logger.error(f"Failed to create FunctionApprovalResponseContent: {e}") + # Handle other OpenAI input item types as needed # (tool calls, function results, etc.) @@ -687,23 +556,6 @@ def _get_start_executor_message_types(self, workflow: Any) -> tuple[Any | None, return start_executor, message_types - def _select_primary_input_type(self, message_types: list[Any]) -> Any | None: - """Choose the most user-friendly input type for workflow kick-off.""" - if not message_types: - return None - - preferred = (str, dict) - - for candidate in preferred: - for message_type in message_types: - if message_type is candidate: - return candidate - origin = get_origin(message_type) - if origin is candidate: - return candidate - - return message_types[0] - def _parse_structured_workflow_input(self, workflow: Any, input_data: dict[str, Any]) -> Any: """Parse structured input data for workflow execution. @@ -728,7 +580,9 @@ def _parse_structured_workflow_input(self, workflow: Any, input_data: dict[str, return input_data # Get the first (primary) input type - input_type = self._select_primary_input_type(message_types) + from ._utils import select_primary_input_type + + input_type = select_primary_input_type(message_types) if input_type is None: logger.debug("Could not select primary input type for workflow - using raw dict") return input_data @@ -764,7 +618,9 @@ def _parse_raw_workflow_input(self, workflow: Any, raw_input: str) -> Any: return raw_input # Get the first (primary) input type - input_type = self._select_primary_input_type(message_types) + from ._utils import select_primary_input_type + + input_type = select_primary_input_type(message_types) if input_type is None: logger.debug("Could not select primary input type for workflow - using raw string") return raw_input diff --git a/python/packages/devui/agent_framework_devui/_mapper.py b/python/packages/devui/agent_framework_devui/_mapper.py index 4866e68230..e950216ae2 100644 --- a/python/packages/devui/agent_framework_devui/_mapper.py +++ b/python/packages/devui/agent_framework_devui/_mapper.py @@ -5,6 +5,7 @@ import json import logging import uuid +from collections import OrderedDict from collections.abc import Sequence from datetime import datetime from typing import Any, Union @@ -17,6 +18,8 @@ ResponseErrorEvent, ResponseFunctionCallArgumentsDeltaEvent, ResponseFunctionResultComplete, + ResponseFunctionToolCall, + ResponseOutputItemAddedEvent, ResponseOutputMessage, ResponseOutputText, ResponseReasoningTextDeltaEvent, @@ -24,7 +27,6 @@ ResponseTextDeltaEvent, ResponseTraceEventComplete, ResponseUsage, - ResponseUsageEventComplete, ResponseWorkflowEventComplete, ) @@ -34,19 +36,26 @@ EventType = Union[ ResponseStreamEvent, ResponseWorkflowEventComplete, - ResponseFunctionResultComplete, + ResponseOutputItemAddedEvent, ResponseTraceEventComplete, - ResponseUsageEventComplete, ] class MessageMapper: """Maps Agent Framework messages/responses to OpenAI format.""" - def __init__(self) -> None: - """Initialize Agent Framework message mapper.""" + def __init__(self, max_contexts: int = 1000) -> None: + """Initialize Agent Framework message mapper. + + Args: + max_contexts: Maximum number of contexts to keep in memory (default: 1000) + """ self.sequence_counter = 0 - self._conversion_contexts: dict[int, dict[str, Any]] = {} + self._conversion_contexts: OrderedDict[int, dict[str, Any]] = OrderedDict() + self._max_contexts = max_contexts + + # Track usage per request for final Response.usage (OpenAI standard) + self._usage_accumulator: dict[str, dict[str, int]] = {} # Register content type mappers for all 12 Agent Framework content types self.content_mappers = { @@ -95,7 +104,7 @@ async def convert_event(self, raw_event: Any, request: AgentFrameworkRequest) -> # Import Agent Framework types for proper isinstance checks try: - from agent_framework import AgentRunResponseUpdate, WorkflowEvent + from agent_framework import AgentRunResponse, AgentRunResponseUpdate, WorkflowEvent from agent_framework._workflows._events import AgentRunUpdateEvent # Handle AgentRunUpdateEvent - workflow event wrapping AgentRunResponseUpdate @@ -107,6 +116,10 @@ async def convert_event(self, raw_event: Any, request: AgentFrameworkRequest) -> # If no data, treat as generic workflow event return await self._convert_workflow_event(raw_event, context) + # Handle complete agent response (AgentRunResponse) - for non-streaming agent execution + if isinstance(raw_event, AgentRunResponse): + return await self._convert_agent_response(raw_event, context) + # Handle agent updates (AgentRunResponseUpdate) - for direct agent execution if isinstance(raw_event, AgentRunResponseUpdate): return await self._convert_agent_update(raw_event, context) @@ -159,17 +172,31 @@ async def aggregate_to_response(self, events: Sequence[Any], request: AgentFrame status="completed", ) - # Create usage object - input_token_count = len(str(request.input)) // 4 if request.input else 0 - output_token_count = len(full_content) // 4 - - usage = ResponseUsage( - input_tokens=input_token_count, - output_tokens=output_token_count, - total_tokens=input_token_count + output_token_count, - input_tokens_details=InputTokensDetails(cached_tokens=0), - output_tokens_details=OutputTokensDetails(reasoning_tokens=0), - ) + # Get usage from accumulator (OpenAI standard) + request_id = str(id(request)) + usage_data = self._usage_accumulator.get(request_id) + + if usage_data: + usage = ResponseUsage( + input_tokens=usage_data["input_tokens"], + output_tokens=usage_data["output_tokens"], + total_tokens=usage_data["total_tokens"], + input_tokens_details=InputTokensDetails(cached_tokens=0), + output_tokens_details=OutputTokensDetails(reasoning_tokens=0), + ) + # Cleanup accumulator + del self._usage_accumulator[request_id] + else: + # Fallback: estimate if no usage was tracked + input_token_count = len(str(request.input)) // 4 if request.input else 0 + output_token_count = len(full_content) // 4 + usage = ResponseUsage( + input_tokens=input_token_count, + output_tokens=output_token_count, + total_tokens=input_token_count + output_token_count, + input_tokens_details=InputTokensDetails(cached_tokens=0), + output_tokens_details=OutputTokensDetails(reasoning_tokens=0), + ) return OpenAIResponse( id=f"resp_{uuid.uuid4().hex[:12]}", @@ -186,10 +213,18 @@ async def aggregate_to_response(self, events: Sequence[Any], request: AgentFrame except Exception as e: logger.exception(f"Error aggregating response: {e}") return await self._create_error_response(str(e), request) + finally: + # Cleanup: Remove context after aggregation to prevent memory leak + # This handles the common case where streaming completes successfully + request_key = id(request) + if self._conversion_contexts.pop(request_key, None): + logger.debug(f"Cleaned up context for request {request_key} after aggregation") def _get_or_create_context(self, request: AgentFrameworkRequest) -> dict[str, Any]: """Get or create conversion context for this request. + Uses LRU eviction when max_contexts is reached to prevent unbounded memory growth. + Args: request: Request to get context for @@ -197,13 +232,26 @@ def _get_or_create_context(self, request: AgentFrameworkRequest) -> dict[str, An Conversion context dictionary """ request_key = id(request) + if request_key not in self._conversion_contexts: + # Evict oldest context if at capacity (LRU eviction) + if len(self._conversion_contexts) >= self._max_contexts: + evicted_key, _ = self._conversion_contexts.popitem(last=False) + logger.debug(f"Evicted oldest context (key={evicted_key}) - at max capacity ({self._max_contexts})") + self._conversion_contexts[request_key] = { "sequence_counter": 0, "item_id": f"msg_{uuid.uuid4().hex[:8]}", "content_index": 0, "output_index": 0, + "request_id": str(request_key), # For usage accumulation + # Track active function calls: {call_id: {name, item_id, args_chunks}} + "active_function_calls": {}, } + else: + # Move to end (mark as recently used for LRU) + self._conversion_contexts.move_to_end(request_key) + return self._conversion_contexts[request_key] def _next_sequence(self, context: dict[str, Any]) -> int: @@ -240,10 +288,11 @@ async def _convert_agent_update(self, update: Any, context: dict[str, Any]) -> S if content_type in self.content_mappers: mapped_events = await self.content_mappers[content_type](content, context) - if isinstance(mapped_events, list): - events.extend(mapped_events) - else: - events.append(mapped_events) + if mapped_events is not None: # Handle None returns (e.g., UsageContent) + if isinstance(mapped_events, list): + events.extend(mapped_events) + else: + events.append(mapped_events) else: # Graceful fallback for unknown content types events.append(await self._create_unknown_content_event(content, context)) @@ -256,6 +305,59 @@ async def _convert_agent_update(self, update: Any, context: dict[str, Any]) -> S return events + async def _convert_agent_response(self, response: Any, context: dict[str, Any]) -> Sequence[Any]: + """Convert complete AgentRunResponse to OpenAI events. + + This handles non-streaming agent execution where agent.run() returns + a complete AgentRunResponse instead of streaming AgentRunResponseUpdate objects. + + Args: + response: Agent run response (AgentRunResponse) + context: Conversion context + + Returns: + List of OpenAI response stream events + """ + events: list[Any] = [] + + try: + # Extract all messages from the response + messages = getattr(response, "messages", []) + + # Convert each message's contents to streaming events + for message in messages: + if hasattr(message, "contents") and message.contents: + for content in message.contents: + content_type = content.__class__.__name__ + + if content_type in self.content_mappers: + mapped_events = await self.content_mappers[content_type](content, context) + if mapped_events is not None: # Handle None returns (e.g., UsageContent) + if isinstance(mapped_events, list): + events.extend(mapped_events) + else: + events.append(mapped_events) + else: + # Graceful fallback for unknown content types + events.append(await self._create_unknown_content_event(content, context)) + + context["content_index"] += 1 + + # Add usage information if present + usage_details = getattr(response, "usage_details", None) + if usage_details: + from agent_framework import UsageContent + + usage_content = UsageContent(details=usage_details) + await self._map_usage_content(usage_content, context) + # Note: _map_usage_content returns None - it accumulates usage for final Response.usage + + except Exception as e: + logger.warning(f"Error converting agent response: {e}") + events.append(await self._create_error_event(str(e), context)) + + return events + async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> Sequence[Any]: """Convert workflow event to structured OpenAI events. @@ -317,41 +419,143 @@ async def _map_reasoning_content(self, content: Any, context: dict[str, Any]) -> async def _map_function_call_content( self, content: Any, context: dict[str, Any] - ) -> list[ResponseFunctionCallArgumentsDeltaEvent]: - """Map FunctionCallContent to ResponseFunctionCallArgumentsDeltaEvent(s).""" - events = [] + ) -> list[ResponseFunctionCallArgumentsDeltaEvent | ResponseOutputItemAddedEvent]: + """Map FunctionCallContent to OpenAI events following Responses API spec. - # For streaming, need to chunk the arguments JSON - args_str = json.dumps(content.arguments) if hasattr(content, "arguments") and content.arguments else "{}" + Agent Framework emits FunctionCallContent in two patterns: + 1. First event: call_id + name + empty/no arguments + 2. Subsequent events: empty call_id/name + argument chunks - # Chunk the JSON string for streaming - for chunk in self._chunk_json_string(args_str): + We emit: + 1. response.output_item.added (with full metadata) for the first event + 2. response.function_call_arguments.delta (referencing item_id) for chunks + """ + events: list[ResponseFunctionCallArgumentsDeltaEvent | ResponseOutputItemAddedEvent] = [] + + # CASE 1: New function call (has call_id and name) + # This is the first event that establishes the function call + if content.call_id and content.name: + # Use call_id as item_id (simpler, and call_id uniquely identifies the call) + item_id = content.call_id + + # Track this function call for later argument deltas + context["active_function_calls"][content.call_id] = { + "item_id": item_id, + "name": content.name, + "arguments_chunks": [], + } + + logger.debug(f"New function call: {content.name} (call_id={content.call_id})") + + # Emit response.output_item.added event per OpenAI spec events.append( - ResponseFunctionCallArgumentsDeltaEvent( - type="response.function_call_arguments.delta", - delta=chunk, - item_id=context["item_id"], + ResponseOutputItemAddedEvent( + type="response.output_item.added", + item=ResponseFunctionToolCall( + id=content.call_id, # Use call_id as the item id + call_id=content.call_id, + name=content.name, + arguments="", # Empty initially, will be filled by deltas + type="function_call", + status="in_progress", + ), output_index=context["output_index"], sequence_number=self._next_sequence(context), ) ) + # CASE 2: Argument deltas (content has arguments, possibly without call_id/name) + if content.arguments: + # Find the active function call for these arguments + active_call = self._get_active_function_call(content, context) + + if active_call: + item_id = active_call["item_id"] + + # Convert arguments to string if it's a dict (Agent Framework may send either) + delta_str = content.arguments if isinstance(content.arguments, str) else json.dumps(content.arguments) + + # Emit argument delta referencing the item_id + events.append( + ResponseFunctionCallArgumentsDeltaEvent( + type="response.function_call_arguments.delta", + delta=delta_str, + item_id=item_id, + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + ) + ) + + # Track chunk for debugging + active_call["arguments_chunks"].append(delta_str) + else: + logger.warning(f"Received function call arguments without active call: {content.arguments[:50]}...") + return events + def _get_active_function_call(self, content: Any, context: dict[str, Any]) -> dict[str, Any] | None: + """Find the active function call for this content. + + Uses call_id if present, otherwise falls back to most recent call. + Necessary because Agent Framework may send argument chunks without call_id. + + Args: + content: FunctionCallContent with possible call_id + context: Conversion context with active_function_calls + + Returns: + Active call dict or None + """ + active_calls: dict[str, dict[str, Any]] = context["active_function_calls"] + + # If content has call_id, use it to find the exact call + if hasattr(content, "call_id") and content.call_id: + result = active_calls.get(content.call_id) + return result if result is not None else None + + # Otherwise, use the most recent call (last one added) + # This handles the case where Agent Framework sends argument chunks + # without call_id in subsequent events + if active_calls: + return list(active_calls.values())[-1] + + return None + async def _map_function_result_content( self, content: Any, context: dict[str, Any] ) -> ResponseFunctionResultComplete: - """Map FunctionResultContent to structured event.""" + """Map FunctionResultContent to custom DevUI event. + + This is a DevUI extension - OpenAI doesn't stream function execution results + because in their model, applications execute functions, not the API. + Agent Framework executes functions, so we emit this event for debugging visibility. + + IMPORTANT: Always use Agent Framework's call_id from the content. + Do NOT generate a new call_id - it must match the one from the function call event. + """ + # Get call_id from content - this MUST match the call_id from the function call + call_id = getattr(content, "call_id", None) + + if not call_id: + logger.warning("FunctionResultContent missing call_id - this will break call/result pairing") + call_id = f"call_{uuid.uuid4().hex[:8]}" # Fallback only if truly missing + + # Extract result + result = getattr(content, "result", None) + exception = getattr(content, "exception", None) + + # Convert result to string + output = result if isinstance(result, str) else json.dumps(result) if result is not None else "" + + # Determine status + status = "incomplete" if exception else "completed" + + # Return custom DevUI event return ResponseFunctionResultComplete( type="response.function_result.complete", - data={ - "call_id": getattr(content, "call_id", f"call_{uuid.uuid4().hex[:8]}"), - "result": getattr(content, "result", None), - "status": "completed" if not getattr(content, "exception", None) else "failed", - "exception": str(getattr(content, "exception", None)) if getattr(content, "exception", None) else None, - "timestamp": datetime.now().isoformat(), - }, - call_id=getattr(content, "call_id", f"call_{uuid.uuid4().hex[:8]}"), + call_id=call_id, + output=output, + status=status, item_id=context["item_id"], output_index=context["output_index"], sequence_number=self._next_sequence(context), @@ -367,37 +571,34 @@ async def _map_error_content(self, content: Any, context: dict[str, Any]) -> Res sequence_number=self._next_sequence(context), ) - async def _map_usage_content(self, content: Any, context: dict[str, Any]) -> ResponseUsageEventComplete: - """Map UsageContent to structured usage event.""" - # Store usage data in context for aggregation - if "usage_data" not in context: - context["usage_data"] = [] - context["usage_data"].append(content) + async def _map_usage_content(self, content: Any, context: dict[str, Any]) -> None: + """Accumulate usage data for final Response.usage field. + OpenAI does NOT stream usage events. Usage appears only in final Response. + This method accumulates usage data per request for later inclusion in Response.usage. + + Returns: + None - no event emitted (usage goes in final Response.usage) + """ # Extract usage from UsageContent.details (UsageDetails object) details = getattr(content, "details", None) - total_tokens = 0 - prompt_tokens = 0 - completion_tokens = 0 + total_tokens = getattr(details, "total_token_count", 0) or 0 + prompt_tokens = getattr(details, "input_token_count", 0) or 0 + completion_tokens = getattr(details, "output_token_count", 0) or 0 - if details: - total_tokens = getattr(details, "total_token_count", 0) or 0 - prompt_tokens = getattr(details, "input_token_count", 0) or 0 - completion_tokens = getattr(details, "output_token_count", 0) or 0 + # Accumulate for final Response.usage + request_id = context.get("request_id", "default") + if request_id not in self._usage_accumulator: + self._usage_accumulator[request_id] = {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0} - return ResponseUsageEventComplete( - type="response.usage.complete", - data={ - "usage_data": details.to_dict() if details and hasattr(details, "to_dict") else {}, - "total_tokens": total_tokens, - "completion_tokens": completion_tokens, - "prompt_tokens": prompt_tokens, - "timestamp": datetime.now().isoformat(), - }, - item_id=context["item_id"], - output_index=context["output_index"], - sequence_number=self._next_sequence(context), - ) + self._usage_accumulator[request_id]["input_tokens"] += prompt_tokens + self._usage_accumulator[request_id]["output_tokens"] += completion_tokens + self._usage_accumulator[request_id]["total_tokens"] += total_tokens + + logger.debug(f"Accumulated usage for {request_id}: {self._usage_accumulator[request_id]}") + + # NO EVENT RETURNED - usage goes in final Response only + return async def _map_data_content(self, content: Any, context: dict[str, Any]) -> ResponseTraceEventComplete: """Map DataContent to structured trace event.""" @@ -510,19 +711,15 @@ async def _create_error_event(self, message: str, context: dict[str, Any]) -> Re async def _create_unknown_event(self, event_data: Any, context: dict[str, Any]) -> ResponseStreamEvent: """Create event for unknown event types.""" - text = f"Unknown event: {event_data!s}\\n" + text = f"Unknown event: {event_data!s}\n" return self._create_text_delta_event(text, context) async def _create_unknown_content_event(self, content: Any, context: dict[str, Any]) -> ResponseStreamEvent: """Create event for unknown content types.""" content_type = content.__class__.__name__ - text = f"⚠️ Unknown content type: {content_type}\\n" + text = f"⚠️ Unknown content type: {content_type}\n" return self._create_text_delta_event(text, context) - def _chunk_json_string(self, json_str: str, chunk_size: int = 50) -> list[str]: - """Chunk JSON string for streaming.""" - return [json_str[i : i + chunk_size] for i in range(0, len(json_str), chunk_size)] - async def _create_error_response(self, error_message: str, request: AgentFrameworkRequest) -> OpenAIResponse: """Create error response.""" error_text = f"Error: {error_message}" diff --git a/python/packages/devui/agent_framework_devui/_server.py b/python/packages/devui/agent_framework_devui/_server.py index daffdc9688..e0c4f2a565 100644 --- a/python/packages/devui/agent_framework_devui/_server.py +++ b/python/packages/devui/agent_framework_devui/_server.py @@ -7,7 +7,7 @@ import logging from collections.abc import AsyncGenerator from contextlib import asynccontextmanager -from typing import Any, get_origin +from typing import Any from fastapi import FastAPI, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware @@ -23,47 +23,6 @@ logger = logging.getLogger(__name__) -def _extract_executor_message_types(executor: Any) -> list[Any]: - """Return declared input types for the given executor.""" - message_types: list[Any] = [] - - try: - input_types = getattr(executor, "input_types", None) - except Exception as exc: # pragma: no cover - defensive logging path - logger.debug(f"Failed to access executor input_types: {exc}") - else: - if input_types: - message_types = list(input_types) - - if not message_types and hasattr(executor, "_handlers"): - try: - handlers = executor._handlers - if isinstance(handlers, dict): - message_types = list(handlers.keys()) - except Exception as exc: # pragma: no cover - defensive logging path - logger.debug(f"Failed to read executor handlers: {exc}") - - return message_types - - -def _select_primary_input_type(message_types: list[Any]) -> Any | None: - """Choose the most user-friendly input type for rendering workflow inputs.""" - if not message_types: - return None - - preferred = (str, dict) - - for candidate in preferred: - for message_type in message_types: - if message_type is candidate: - return candidate - origin = get_origin(message_type) - if origin is candidate: - return candidate - - return message_types[0] - - class DevServer: """Development Server - OpenAI compatible API server for debugging agents.""" @@ -263,7 +222,11 @@ async def get_entity_info(entity_id: str) -> EntityInfo: start_executor_id = "" try: - from ._utils import generate_input_schema + from ._utils import ( + extract_executor_message_types, + generate_input_schema, + select_primary_input_type, + ) start_executor = entity_obj.get_start_executor() except Exception as e: @@ -274,8 +237,8 @@ async def get_entity_info(entity_id: str) -> EntityInfo: start_executor, "id", "" ) - message_types = _extract_executor_message_types(start_executor) - input_type = _select_primary_input_type(message_types) + message_types = extract_executor_message_types(start_executor) + input_type = select_primary_input_type(message_types) if input_type: input_type_name = getattr(input_type, "__name__", str(input_type)) @@ -421,112 +384,161 @@ async def create_response(request: AgentFrameworkRequest, raw_request: Request) error = OpenAIError.create(f"Execution failed: {e!s}") return JSONResponse(status_code=500, content=error.to_dict()) - @app.post("/v1/threads") - async def create_thread(request_data: dict[str, Any]) -> dict[str, Any]: - """Create a new thread for an agent.""" + # ======================================== + # OpenAI Conversations API (Standard) + # ======================================== + + @app.post("/v1/conversations") + async def create_conversation(request_data: dict[str, Any]) -> dict[str, Any]: + """Create a new conversation - OpenAI standard.""" try: - agent_id = request_data.get("agent_id") - if not agent_id: - raise HTTPException(status_code=400, detail="agent_id is required") + metadata = request_data.get("metadata") + executor = await self._ensure_executor() + conversation = executor.conversation_store.create_conversation(metadata=metadata) + return conversation.model_dump() + except HTTPException: + raise + except Exception as e: + logger.error(f"Error creating conversation: {e}") + raise HTTPException(status_code=500, detail=f"Failed to create conversation: {e!s}") from e + @app.get("/v1/conversations") + async def list_conversations(agent_id: str | None = None) -> dict[str, Any]: + """List conversations, optionally filtered by agent_id.""" + try: executor = await self._ensure_executor() - thread_id = executor.create_thread(agent_id) + + if agent_id: + # Filter by agent_id metadata + conversations = executor.conversation_store.list_conversations_by_metadata({"agent_id": agent_id}) + else: + # Return all conversations (for InMemoryStore, list all) + # Note: This assumes list_conversations_by_metadata({}) returns all + conversations = executor.conversation_store.list_conversations_by_metadata({}) return { - "id": thread_id, - "object": "thread", - "created_at": int(__import__("time").time()), - "metadata": {"agent_id": agent_id}, + "object": "list", + "data": [conv.model_dump() for conv in conversations], + "has_more": False, } except HTTPException: raise except Exception as e: - logger.error(f"Error creating thread: {e}") - raise HTTPException(status_code=500, detail=f"Failed to create thread: {e!s}") from e + logger.error(f"Error listing conversations: {e}") + raise HTTPException(status_code=500, detail=f"Failed to list conversations: {e!s}") from e - @app.get("/v1/threads") - async def list_threads(agent_id: str) -> dict[str, Any]: - """List threads for an agent.""" + @app.get("/v1/conversations/{conversation_id}") + async def retrieve_conversation(conversation_id: str) -> dict[str, Any]: + """Get conversation - OpenAI standard.""" try: executor = await self._ensure_executor() - thread_ids = executor.list_threads_for_agent(agent_id) - - # Convert thread IDs to thread objects - threads = [] - for thread_id in thread_ids: - threads.append({"id": thread_id, "object": "thread", "agent_id": agent_id}) - - return {"object": "list", "data": threads} + conversation = executor.conversation_store.get_conversation(conversation_id) + if not conversation: + raise HTTPException(status_code=404, detail="Conversation not found") + return conversation.model_dump() + except HTTPException: + raise except Exception as e: - logger.error(f"Error listing threads: {e}") - raise HTTPException(status_code=500, detail=f"Failed to list threads: {e!s}") from e + logger.error(f"Error getting conversation {conversation_id}: {e}") + raise HTTPException(status_code=500, detail=f"Failed to get conversation: {e!s}") from e - @app.get("/v1/threads/{thread_id}") - async def get_thread(thread_id: str) -> dict[str, Any]: - """Get thread information.""" + @app.post("/v1/conversations/{conversation_id}") + async def update_conversation(conversation_id: str, request_data: dict[str, Any]) -> dict[str, Any]: + """Update conversation metadata - OpenAI standard.""" try: executor = await self._ensure_executor() - - # Check if thread exists - thread = executor.get_thread(thread_id) - if not thread: - raise HTTPException(status_code=404, detail="Thread not found") - - # Get the agent that owns this thread - agent_id = executor.get_agent_for_thread(thread_id) - - return {"id": thread_id, "object": "thread", "agent_id": agent_id} + metadata = request_data.get("metadata", {}) + conversation = executor.conversation_store.update_conversation(conversation_id, metadata=metadata) + return conversation.model_dump() + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) from e except HTTPException: raise except Exception as e: - logger.error(f"Error getting thread {thread_id}: {e}") - raise HTTPException(status_code=500, detail=f"Failed to get thread: {e!s}") from e + logger.error(f"Error updating conversation {conversation_id}: {e}") + raise HTTPException(status_code=500, detail=f"Failed to update conversation: {e!s}") from e - @app.delete("/v1/threads/{thread_id}") - async def delete_thread(thread_id: str) -> dict[str, Any]: - """Delete a thread.""" + @app.delete("/v1/conversations/{conversation_id}") + async def delete_conversation(conversation_id: str) -> dict[str, Any]: + """Delete conversation - OpenAI standard.""" try: executor = await self._ensure_executor() - success = executor.delete_thread(thread_id) - - if not success: - raise HTTPException(status_code=404, detail="Thread not found") - - return {"id": thread_id, "object": "thread.deleted", "deleted": True} + result = executor.conversation_store.delete_conversation(conversation_id) + return result.model_dump() + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) from e except HTTPException: raise except Exception as e: - logger.error(f"Error deleting thread {thread_id}: {e}") - raise HTTPException(status_code=500, detail=f"Failed to delete thread: {e!s}") from e + logger.error(f"Error deleting conversation {conversation_id}: {e}") + raise HTTPException(status_code=500, detail=f"Failed to delete conversation: {e!s}") from e - @app.get("/v1/threads/{thread_id}/messages") - async def get_thread_messages(thread_id: str) -> dict[str, Any]: - """Get messages from a thread.""" + @app.post("/v1/conversations/{conversation_id}/items") + async def create_conversation_items(conversation_id: str, request_data: dict[str, Any]) -> dict[str, Any]: + """Add items to conversation - OpenAI standard.""" try: executor = await self._ensure_executor() + items = request_data.get("items", []) + conv_items = await executor.conversation_store.add_items(conversation_id, items=items) + return {"object": "list", "data": [item.model_dump() for item in conv_items]} + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) from e + except HTTPException: + raise + except Exception as e: + logger.error(f"Error adding items to conversation {conversation_id}: {e}") + raise HTTPException(status_code=500, detail=f"Failed to add items: {e!s}") from e + + @app.get("/v1/conversations/{conversation_id}/items") + async def list_conversation_items( + conversation_id: str, limit: int = 100, after: str | None = None, order: str = "asc" + ) -> dict[str, Any]: + """List conversation items - OpenAI standard.""" + try: + executor = await self._ensure_executor() + items, has_more = await executor.conversation_store.list_items( + conversation_id, limit=limit, after=after, order=order + ) + return { + "object": "list", + "data": [item.model_dump() for item in items], + "has_more": has_more, + } + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) from e + except HTTPException: + raise + except Exception as e: + logger.error(f"Error listing items for conversation {conversation_id}: {e}") + raise HTTPException(status_code=500, detail=f"Failed to list items: {e!s}") from e - # Check if thread exists - thread = executor.get_thread(thread_id) - if not thread: - raise HTTPException(status_code=404, detail="Thread not found") - - # Get messages from thread - messages = await executor.get_thread_messages(thread_id) - - return {"object": "list", "data": messages, "thread_id": thread_id} + @app.get("/v1/conversations/{conversation_id}/items/{item_id}") + async def retrieve_conversation_item(conversation_id: str, item_id: str) -> dict[str, Any]: + """Get specific conversation item - OpenAI standard.""" + try: + executor = await self._ensure_executor() + item = executor.conversation_store.get_item(conversation_id, item_id) + if not item: + raise HTTPException(status_code=404, detail="Item not found") + return item.model_dump() except HTTPException: raise except Exception as e: - logger.error(f"Error getting messages for thread {thread_id}: {e}") - raise HTTPException(status_code=500, detail=f"Failed to get thread messages: {e!s}") from e + logger.error(f"Error getting item {item_id} from conversation {conversation_id}: {e}") + raise HTTPException(status_code=500, detail=f"Failed to get item: {e!s}") from e async def _stream_execution( self, executor: AgentFrameworkExecutor, request: AgentFrameworkRequest ) -> AsyncGenerator[str, None]: """Stream execution directly through executor.""" try: - # Direct call to executor - simple and clean + # Collect events for final response.completed event + events = [] + + # Stream all events async for event in executor.execute_streaming(request): + events.append(event) + # IMPORTANT: Check model_dump_json FIRST because to_json() can have newlines (pretty-printing) # which breaks SSE format. model_dump_json() returns single-line JSON. if hasattr(event, "model_dump_json"): @@ -544,6 +556,17 @@ async def _stream_execution( payload = json.dumps(str(event)) yield f"data: {payload}\n\n" + # Aggregate to final response and emit response.completed event (OpenAI standard) + from .models import ResponseCompletedEvent + + final_response = await executor.message_mapper.aggregate_to_response(events, request) + completed_event = ResponseCompletedEvent( + type="response.completed", + response=final_response, + sequence_number=len(events), + ) + yield f"data: {completed_event.model_dump_json()}\n\n" + # Send final done event yield "data: [DONE]\n\n" diff --git a/python/packages/devui/agent_framework_devui/_session.py b/python/packages/devui/agent_framework_devui/_session.py index 587207a924..5cabeee072 100644 --- a/python/packages/devui/agent_framework_devui/_session.py +++ b/python/packages/devui/agent_framework_devui/_session.py @@ -67,7 +67,7 @@ def close_session(self, session_id: str) -> None: logger.debug(f"Closed session: {session_id}") def add_request_record( - self, session_id: str, entity_id: str, executor_name: str, request_input: Any, model: str + self, session_id: str, entity_id: str, executor_name: str, request_input: Any, model_id: str ) -> str: """Add a request record to a session. @@ -76,7 +76,7 @@ def add_request_record( entity_id: ID of the entity being executed executor_name: Name of the executor request_input: Input for the request - model: Model name + model_id: Model name Returns: Request ID @@ -91,7 +91,7 @@ def add_request_record( "entity_id": entity_id, "executor": executor_name, "input": request_input, - "model": model, + "model_id": model_id, "stream": True, } session["requests"].append(request_record) diff --git a/python/packages/devui/agent_framework_devui/_utils.py b/python/packages/devui/agent_framework_devui/_utils.py index a36e5da8de..58aedbd2f3 100644 --- a/python/packages/devui/agent_framework_devui/_utils.py +++ b/python/packages/devui/agent_framework_devui/_utils.py @@ -10,6 +10,133 @@ logger = logging.getLogger(__name__) +# ============================================================================ +# Agent Metadata Extraction +# ============================================================================ + + +def extract_agent_metadata(entity_object: Any) -> dict[str, Any]: + """Extract agent-specific metadata from an entity object. + + Args: + entity_object: Agent Framework agent object + + Returns: + Dictionary with agent metadata: instructions, model, chat_client_type, + context_providers, and middleware + """ + metadata = { + "instructions": None, + "model": None, + "chat_client_type": None, + "context_providers": None, + "middleware": None, + } + + # Try to get instructions + if hasattr(entity_object, "chat_options") and hasattr(entity_object.chat_options, "instructions"): + metadata["instructions"] = entity_object.chat_options.instructions + + # Try to get model - check both chat_options and chat_client + if ( + hasattr(entity_object, "chat_options") + and hasattr(entity_object.chat_options, "model_id") + and entity_object.chat_options.model_id + ): + metadata["model"] = entity_object.chat_options.model_id + elif hasattr(entity_object, "chat_client") and hasattr(entity_object.chat_client, "model_id"): + metadata["model"] = entity_object.chat_client.model_id + + # Try to get chat client type + if hasattr(entity_object, "chat_client"): + metadata["chat_client_type"] = entity_object.chat_client.__class__.__name__ + + # Try to get context providers + if ( + hasattr(entity_object, "context_provider") + and entity_object.context_provider + and hasattr(entity_object.context_provider, "__class__") + ): + metadata["context_providers"] = [entity_object.context_provider.__class__.__name__] # type: ignore + + # Try to get middleware + if hasattr(entity_object, "middleware") and entity_object.middleware: + middleware_list: list[str] = [] + for m in entity_object.middleware: + # Try multiple ways to get a good name for middleware + if hasattr(m, "__name__"): # Function or callable + middleware_list.append(m.__name__) + elif hasattr(m, "__class__"): # Class instance + middleware_list.append(m.__class__.__name__) + else: + middleware_list.append(str(m)) + metadata["middleware"] = middleware_list # type: ignore + + return metadata + + +# ============================================================================ +# Workflow Input Type Utilities +# ============================================================================ + + +def extract_executor_message_types(executor: Any) -> list[Any]: + """Extract declared input types for the given executor. + + Args: + executor: Workflow executor object + + Returns: + List of message types that the executor accepts + """ + message_types: list[Any] = [] + + try: + input_types = getattr(executor, "input_types", None) + except Exception as exc: # pragma: no cover - defensive logging path + logger.debug(f"Failed to access executor input_types: {exc}") + else: + if input_types: + message_types = list(input_types) + + if not message_types and hasattr(executor, "_handlers"): + try: + handlers = executor._handlers + if isinstance(handlers, dict): + message_types = list(handlers.keys()) + except Exception as exc: # pragma: no cover - defensive logging path + logger.debug(f"Failed to read executor handlers: {exc}") + + return message_types + + +def select_primary_input_type(message_types: list[Any]) -> Any | None: + """Choose the most user-friendly input type for workflow inputs. + + Prefers str and dict types for better user experience. + + Args: + message_types: List of possible message types + + Returns: + Selected primary input type, or None if list is empty + """ + if not message_types: + return None + + preferred = (str, dict) + + for candidate in preferred: + for message_type in message_types: + if message_type is candidate: + return candidate + origin = get_origin(message_type) + if origin is candidate: + return candidate + + return message_types[0] + + # ============================================================================ # Type System Utilities # ============================================================================ diff --git a/python/packages/devui/agent_framework_devui/models/__init__.py b/python/packages/devui/agent_framework_devui/models/__init__.py index d4c2d0da24..3db699beff 100644 --- a/python/packages/devui/agent_framework_devui/models/__init__.py +++ b/python/packages/devui/agent_framework_devui/models/__init__.py @@ -4,11 +4,18 @@ # Import discovery models # Import all OpenAI types directly from the openai package +from openai.types.conversations import Conversation, ConversationDeletedResource +from openai.types.conversations.conversation_item import ConversationItem from openai.types.responses import ( Response, + ResponseCompletedEvent, ResponseErrorEvent, ResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionToolCall, + ResponseFunctionToolCallOutputItem, ResponseInputParam, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, ResponseOutputMessage, ResponseOutputText, ResponseReasoningTextDeltaEvent, @@ -25,14 +32,9 @@ AgentFrameworkRequest, OpenAIError, ResponseFunctionResultComplete, - ResponseFunctionResultDelta, ResponseTraceEvent, ResponseTraceEventComplete, - ResponseTraceEventDelta, - ResponseUsageEventComplete, - ResponseUsageEventDelta, ResponseWorkflowEventComplete, - ResponseWorkflowEventDelta, ) # Type alias for compatibility @@ -41,6 +43,9 @@ # Export all types for easy importing __all__ = [ "AgentFrameworkRequest", + "Conversation", + "ConversationDeletedResource", + "ConversationItem", "DiscoveryResponse", "EntityInfo", "InputTokensDetails", @@ -49,11 +54,15 @@ "OpenAIResponse", "OutputTokensDetails", "Response", + "ResponseCompletedEvent", "ResponseErrorEvent", "ResponseFunctionCallArgumentsDeltaEvent", "ResponseFunctionResultComplete", - "ResponseFunctionResultDelta", + "ResponseFunctionToolCall", + "ResponseFunctionToolCallOutputItem", "ResponseInputParam", + "ResponseOutputItemAddedEvent", + "ResponseOutputItemDoneEvent", "ResponseOutputMessage", "ResponseOutputText", "ResponseReasoningTextDeltaEvent", @@ -61,12 +70,8 @@ "ResponseTextDeltaEvent", "ResponseTraceEvent", "ResponseTraceEventComplete", - "ResponseTraceEventDelta", "ResponseUsage", - "ResponseUsageEventComplete", - "ResponseUsageEventDelta", "ResponseWorkflowEventComplete", - "ResponseWorkflowEventDelta", "ResponsesModel", "ToolParam", ] diff --git a/python/packages/devui/agent_framework_devui/models/_discovery_models.py b/python/packages/devui/agent_framework_devui/models/_discovery_models.py index 936526cf27..f4faaf6065 100644 --- a/python/packages/devui/agent_framework_devui/models/_discovery_models.py +++ b/python/packages/devui/agent_framework_devui/models/_discovery_models.py @@ -39,7 +39,7 @@ class EntityInfo(BaseModel): # Agent-specific fields (optional, populated when available) instructions: str | None = None - model: str | None = None + model_id: str | None = None chat_client_type: str | None = None context_providers: list[str] | None = None middleware: list[str] | None = None diff --git a/python/packages/devui/agent_framework_devui/models/_openai_custom.py b/python/packages/devui/agent_framework_devui/models/_openai_custom.py index 91aae0eb5f..aa41ea2522 100644 --- a/python/packages/devui/agent_framework_devui/models/_openai_custom.py +++ b/python/packages/devui/agent_framework_devui/models/_openai_custom.py @@ -3,7 +3,7 @@ """Custom OpenAI-compatible event types for Agent Framework extensions. These are custom event types that extend beyond the standard OpenAI Responses API -to support Agent Framework specific features like workflows, traces, and function results. +to support Agent Framework specific features like workflows and traces. """ from __future__ import annotations @@ -15,18 +15,6 @@ # Custom Agent Framework OpenAI event types for structured data -class ResponseWorkflowEventDelta(BaseModel): - """Structured workflow event with completion tracking.""" - - type: Literal["response.workflow_event.delta"] = "response.workflow_event.delta" - delta: dict[str, Any] - executor_id: str | None = None - is_complete: bool = False # Track if this is the final part - item_id: str - output_index: int = 0 - sequence_number: int - - class ResponseWorkflowEventComplete(BaseModel): """Complete workflow event data.""" @@ -38,41 +26,6 @@ class ResponseWorkflowEventComplete(BaseModel): sequence_number: int -class ResponseFunctionResultDelta(BaseModel): - """Structured function result with completion tracking.""" - - type: Literal["response.function_result.delta"] = "response.function_result.delta" - delta: dict[str, Any] - call_id: str - is_complete: bool = False - item_id: str - output_index: int = 0 - sequence_number: int - - -class ResponseFunctionResultComplete(BaseModel): - """Complete function result data.""" - - type: Literal["response.function_result.complete"] = "response.function_result.complete" - data: dict[str, Any] # Complete function result data, not delta - call_id: str - item_id: str - output_index: int = 0 - sequence_number: int - - -class ResponseTraceEventDelta(BaseModel): - """Structured trace event with completion tracking.""" - - type: Literal["response.trace.delta"] = "response.trace.delta" - delta: dict[str, Any] - span_id: str | None = None - is_complete: bool = False - item_id: str - output_index: int = 0 - sequence_number: int - - class ResponseTraceEventComplete(BaseModel): """Complete trace event data.""" @@ -84,22 +37,18 @@ class ResponseTraceEventComplete(BaseModel): sequence_number: int -class ResponseUsageEventDelta(BaseModel): - """Structured usage event with completion tracking.""" - - type: Literal["response.usage.delta"] = "response.usage.delta" - delta: dict[str, Any] - is_complete: bool = False - item_id: str - output_index: int = 0 - sequence_number: int - +class ResponseFunctionResultComplete(BaseModel): + """Custom DevUI event for function execution results. -class ResponseUsageEventComplete(BaseModel): - """Complete usage event data.""" + This is a DevUI extension - OpenAI doesn't stream function execution results + because in their model, the application executes functions, not the API. + Agent Framework executes functions, so we emit this event for debugging visibility. + """ - type: Literal["response.usage.complete"] = "response.usage.complete" - data: dict[str, Any] # Complete usage data, not delta + type: Literal["response.function_result.complete"] = "response.function_result.complete" + call_id: str + output: str + status: Literal["in_progress", "completed", "incomplete"] item_id: str output_index: int = 0 sequence_number: int @@ -110,7 +59,6 @@ class AgentFrameworkExtraBody(BaseModel): """Agent Framework specific routing fields for OpenAI requests.""" entity_id: str - thread_id: str | None = None input_data: dict[str, Any] | None = None model_config = ConfigDict(extra="allow") @@ -118,17 +66,21 @@ class AgentFrameworkExtraBody(BaseModel): # Agent Framework Request Model - Extending real OpenAI types class AgentFrameworkRequest(BaseModel): - """OpenAI ResponseCreateParams with Agent Framework extensions. + """OpenAI ResponseCreateParams with Agent Framework routing. - This properly extends the real OpenAI API request format while adding - our custom routing fields in extra_body. + This properly extends the real OpenAI API request format. + - Uses 'model' field as entity_id (agent/workflow name) + - Uses 'conversation' field for conversation context (OpenAI standard) """ # All OpenAI fields from ResponseCreateParams - model: str + model: str # Used as entity_id in DevUI! input: str | list[Any] # ResponseInputParam stream: bool | None = False + # OpenAI conversation parameter (standard!) + conversation: str | dict[str, Any] | None = None # Union[str, {"id": str}] + # Common OpenAI optional fields instructions: str | None = None metadata: dict[str, Any] | None = None @@ -136,32 +88,35 @@ class AgentFrameworkRequest(BaseModel): max_output_tokens: int | None = None tools: list[dict[str, Any]] | None = None - # Agent Framework extension - strongly typed - extra_body: AgentFrameworkExtraBody | None = None - - entity_id: str | None = None # Allow entity_id as top-level field + # Optional extra_body for advanced use cases + extra_body: dict[str, Any] | None = None model_config = ConfigDict(extra="allow") - def get_entity_id(self) -> str | None: - """Get entity_id from either top-level field or extra_body.""" - # Priority 1: Top-level entity_id field - if self.entity_id: - return self.entity_id - - # Priority 2: entity_id in extra_body - if self.extra_body and hasattr(self.extra_body, "entity_id"): - return self.extra_body.entity_id - + def get_entity_id(self) -> str: + """Get entity_id from model field. + + In DevUI, model IS the entity_id (agent/workflow name). + Simple and clean! + """ + return self.model + + def get_conversation_id(self) -> str | None: + """Extract conversation_id from conversation parameter. + + Supports both string and object forms: + - conversation: "conv_123" + - conversation: {"id": "conv_123"} + """ + if isinstance(self.conversation, str): + return self.conversation + if isinstance(self.conversation, dict): + return self.conversation.get("id") return None def to_openai_params(self) -> dict[str, Any]: """Convert to dict for OpenAI client compatibility.""" - data = self.model_dump(exclude={"extra_body", "entity_id"}, exclude_none=True) - if self.extra_body: - # Don't merge extra_body into main params to keep them separate - data["extra_body"] = self.extra_body - return data + return self.model_dump(exclude_none=True) # Error handling @@ -198,12 +153,7 @@ def to_json(self) -> str: "AgentFrameworkRequest", "OpenAIError", "ResponseFunctionResultComplete", - "ResponseFunctionResultDelta", "ResponseTraceEvent", "ResponseTraceEventComplete", - "ResponseTraceEventDelta", - "ResponseUsageEventComplete", - "ResponseUsageEventDelta", "ResponseWorkflowEventComplete", - "ResponseWorkflowEventDelta", ] diff --git a/python/packages/devui/agent_framework_devui/ui/assets/index-BhFnsoso.css b/python/packages/devui/agent_framework_devui/ui/assets/index-BhFnsoso.css new file mode 100644 index 0000000000..41a2f21902 --- /dev/null +++ b/python/packages/devui/agent_framework_devui/ui/assets/index-BhFnsoso.css @@ -0,0 +1 @@ +/*! tailwindcss v4.1.12 | MIT License | https://tailwindcss.com */@layer properties{@supports (((-webkit-hyphens:none)) and (not (margin-trim:inline))) or ((-moz-orient:inline) and (not (color:rgb(from red r g b)))){*,:before,:after,::backdrop{--tw-translate-x:0;--tw-translate-y:0;--tw-translate-z:0;--tw-scale-x:1;--tw-scale-y:1;--tw-scale-z:1;--tw-rotate-x:initial;--tw-rotate-y:initial;--tw-rotate-z:initial;--tw-skew-x:initial;--tw-skew-y:initial;--tw-space-y-reverse:0;--tw-space-x-reverse:0;--tw-border-style:solid;--tw-leading:initial;--tw-font-weight:initial;--tw-tracking:initial;--tw-shadow:0 0 #0000;--tw-shadow-color:initial;--tw-shadow-alpha:100%;--tw-inset-shadow:0 0 #0000;--tw-inset-shadow-color:initial;--tw-inset-shadow-alpha:100%;--tw-ring-color:initial;--tw-ring-shadow:0 0 #0000;--tw-inset-ring-color:initial;--tw-inset-ring-shadow:0 0 #0000;--tw-ring-inset:initial;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-offset-shadow:0 0 #0000;--tw-outline-style:solid;--tw-blur:initial;--tw-brightness:initial;--tw-contrast:initial;--tw-grayscale:initial;--tw-hue-rotate:initial;--tw-invert:initial;--tw-opacity:initial;--tw-saturate:initial;--tw-sepia:initial;--tw-drop-shadow:initial;--tw-drop-shadow-color:initial;--tw-drop-shadow-alpha:100%;--tw-drop-shadow-size:initial;--tw-backdrop-blur:initial;--tw-backdrop-brightness:initial;--tw-backdrop-contrast:initial;--tw-backdrop-grayscale:initial;--tw-backdrop-hue-rotate:initial;--tw-backdrop-invert:initial;--tw-backdrop-opacity:initial;--tw-backdrop-saturate:initial;--tw-backdrop-sepia:initial;--tw-duration:initial;--tw-ease:initial;--tw-animation-delay:0s;--tw-animation-direction:normal;--tw-animation-duration:initial;--tw-animation-fill-mode:none;--tw-animation-iteration-count:1;--tw-enter-blur:0;--tw-enter-opacity:1;--tw-enter-rotate:0;--tw-enter-scale:1;--tw-enter-translate-x:0;--tw-enter-translate-y:0;--tw-exit-blur:0;--tw-exit-opacity:1;--tw-exit-rotate:0;--tw-exit-scale:1;--tw-exit-translate-x:0;--tw-exit-translate-y:0}}}@layer theme{:root,:host{--font-sans:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-mono:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--color-red-50:oklch(97.1% .013 17.38);--color-red-100:oklch(93.6% .032 17.717);--color-red-200:oklch(88.5% .062 18.334);--color-red-400:oklch(70.4% .191 22.216);--color-red-500:oklch(63.7% .237 25.331);--color-red-600:oklch(57.7% .245 27.325);--color-red-700:oklch(50.5% .213 27.518);--color-red-800:oklch(44.4% .177 26.899);--color-red-900:oklch(39.6% .141 25.723);--color-red-950:oklch(25.8% .092 26.042);--color-orange-50:oklch(98% .016 73.684);--color-orange-100:oklch(95.4% .038 75.164);--color-orange-200:oklch(90.1% .076 70.697);--color-orange-400:oklch(75% .183 55.934);--color-orange-500:oklch(70.5% .213 47.604);--color-orange-600:oklch(64.6% .222 41.116);--color-orange-800:oklch(47% .157 37.304);--color-orange-900:oklch(40.8% .123 38.172);--color-orange-950:oklch(26.6% .079 36.259);--color-amber-50:oklch(98.7% .022 95.277);--color-amber-100:oklch(96.2% .059 95.617);--color-amber-200:oklch(92.4% .12 95.746);--color-amber-300:oklch(87.9% .169 91.605);--color-amber-400:oklch(82.8% .189 84.429);--color-amber-500:oklch(76.9% .188 70.08);--color-amber-600:oklch(66.6% .179 58.318);--color-amber-700:oklch(55.5% .163 48.998);--color-amber-800:oklch(47.3% .137 46.201);--color-amber-900:oklch(41.4% .112 45.904);--color-amber-950:oklch(27.9% .077 45.635);--color-yellow-100:oklch(97.3% .071 103.193);--color-yellow-200:oklch(94.5% .129 101.54);--color-yellow-400:oklch(85.2% .199 91.936);--color-yellow-600:oklch(68.1% .162 75.834);--color-yellow-700:oklch(55.4% .135 66.442);--color-green-50:oklch(98.2% .018 155.826);--color-green-100:oklch(96.2% .044 156.743);--color-green-200:oklch(92.5% .084 155.995);--color-green-300:oklch(87.1% .15 154.449);--color-green-400:oklch(79.2% .209 151.711);--color-green-500:oklch(72.3% .219 149.579);--color-green-600:oklch(62.7% .194 149.214);--color-green-700:oklch(52.7% .154 150.069);--color-green-800:oklch(44.8% .119 151.328);--color-green-900:oklch(39.3% .095 152.535);--color-green-950:oklch(26.6% .065 152.934);--color-emerald-50:oklch(97.9% .021 166.113);--color-emerald-100:oklch(95% .052 163.051);--color-emerald-200:oklch(90.5% .093 164.15);--color-emerald-300:oklch(84.5% .143 164.978);--color-emerald-400:oklch(76.5% .177 163.223);--color-emerald-600:oklch(59.6% .145 163.225);--color-emerald-700:oklch(50.8% .118 165.612);--color-emerald-800:oklch(43.2% .095 166.913);--color-emerald-900:oklch(37.8% .077 168.94);--color-emerald-950:oklch(26.2% .051 172.552);--color-blue-50:oklch(97% .014 254.604);--color-blue-100:oklch(93.2% .032 255.585);--color-blue-200:oklch(88.2% .059 254.128);--color-blue-300:oklch(80.9% .105 251.813);--color-blue-400:oklch(70.7% .165 254.624);--color-blue-500:oklch(62.3% .214 259.815);--color-blue-600:oklch(54.6% .245 262.881);--color-blue-700:oklch(48.8% .243 264.376);--color-blue-800:oklch(42.4% .199 265.638);--color-blue-900:oklch(37.9% .146 265.522);--color-blue-950:oklch(28.2% .091 267.935);--color-purple-50:oklch(97.7% .014 308.299);--color-purple-100:oklch(94.6% .033 307.174);--color-purple-400:oklch(71.4% .203 305.504);--color-purple-500:oklch(62.7% .265 303.9);--color-purple-600:oklch(55.8% .288 302.321);--color-purple-900:oklch(38.1% .176 304.987);--color-gray-50:oklch(98.5% .002 247.839);--color-gray-100:oklch(96.7% .003 264.542);--color-gray-200:oklch(92.8% .006 264.531);--color-gray-300:oklch(87.2% .01 258.338);--color-gray-400:oklch(70.7% .022 261.325);--color-gray-500:oklch(55.1% .027 264.364);--color-gray-600:oklch(44.6% .03 256.802);--color-gray-700:oklch(37.3% .034 259.733);--color-gray-800:oklch(27.8% .033 256.848);--color-gray-900:oklch(21% .034 264.665);--color-black:#000;--color-white:#fff;--spacing:.25rem;--container-md:28rem;--container-lg:32rem;--container-2xl:42rem;--container-4xl:56rem;--container-5xl:64rem;--container-7xl:80rem;--text-xs:.75rem;--text-xs--line-height:calc(1/.75);--text-sm:.875rem;--text-sm--line-height:calc(1.25/.875);--text-base:1rem;--text-base--line-height: 1.5 ;--text-lg:1.125rem;--text-lg--line-height:calc(1.75/1.125);--text-xl:1.25rem;--text-xl--line-height:calc(1.75/1.25);--text-2xl:1.5rem;--text-2xl--line-height:calc(2/1.5);--font-weight-medium:500;--font-weight-semibold:600;--tracking-widest:.1em;--leading-tight:1.25;--leading-relaxed:1.625;--drop-shadow-lg:0 4px 4px #00000026;--ease-in-out:cubic-bezier(.4,0,.2,1);--animate-spin:spin 1s linear infinite;--animate-pulse:pulse 2s cubic-bezier(.4,0,.6,1)infinite;--animate-bounce:bounce 1s infinite;--blur-sm:8px;--default-transition-duration:.15s;--default-transition-timing-function:cubic-bezier(.4,0,.2,1);--default-font-family:var(--font-sans);--default-mono-font-family:var(--font-mono)}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1}@supports (not ((-webkit-appearance:-apple-pay-button))) or (contain-intrinsic-size:1px){::placeholder{color:currentColor}@supports (color:color-mix(in lab,red,red)){::placeholder{color:color-mix(in oklab,currentcolor 50%,transparent)}}}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}::-webkit-calendar-picker-indicator{line-height:1}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){appearance:button}::file-selector-button{appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}*{border-color:var(--border);outline-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){*{outline-color:color-mix(in oklab,var(--ring)50%,transparent)}}body{background-color:var(--background);color:var(--foreground)}}@layer components;@layer utilities{.\@container\/card-header{container:card-header/inline-size}.pointer-events-auto{pointer-events:auto}.pointer-events-none{pointer-events:none}.collapse{visibility:collapse}.visible{visibility:visible}.sr-only{clip:rect(0,0,0,0);white-space:nowrap;border-width:0;width:1px;height:1px;margin:-1px;padding:0;position:absolute;overflow:hidden}.absolute{position:absolute}.fixed{position:fixed}.relative{position:relative}.inset-0{inset:calc(var(--spacing)*0)}.inset-2{inset:calc(var(--spacing)*2)}.inset-x-0{inset-inline:calc(var(--spacing)*0)}.inset-y-0{inset-block:calc(var(--spacing)*0)}.-top-1{top:calc(var(--spacing)*-1)}.-top-2{top:calc(var(--spacing)*-2)}.top-4{top:calc(var(--spacing)*4)}.-right-1{right:calc(var(--spacing)*-1)}.-right-2{right:calc(var(--spacing)*-2)}.right-0{right:calc(var(--spacing)*0)}.right-2{right:calc(var(--spacing)*2)}.right-3{right:calc(var(--spacing)*3)}.right-4{right:calc(var(--spacing)*4)}.-bottom-2{bottom:calc(var(--spacing)*-2)}.bottom-0{bottom:calc(var(--spacing)*0)}.bottom-3{bottom:calc(var(--spacing)*3)}.bottom-14{bottom:calc(var(--spacing)*14)}.bottom-24{bottom:calc(var(--spacing)*24)}.-left-2{left:calc(var(--spacing)*-2)}.left-0{left:calc(var(--spacing)*0)}.left-1\/2{left:50%}.left-2{left:calc(var(--spacing)*2)}.z-10{z-index:10}.z-20{z-index:20}.z-50{z-index:50}.col-start-2{grid-column-start:2}.row-span-2{grid-row:span 2/span 2}.row-start-1{grid-row-start:1}.container{width:100%}@media (min-width:40rem){.container{max-width:40rem}}@media (min-width:48rem){.container{max-width:48rem}}@media (min-width:64rem){.container{max-width:64rem}}@media (min-width:80rem){.container{max-width:80rem}}@media (min-width:96rem){.container{max-width:96rem}}.-mx-1{margin-inline:calc(var(--spacing)*-1)}.mx-auto{margin-inline:auto}.my-1{margin-block:calc(var(--spacing)*1)}.my-2{margin-block:calc(var(--spacing)*2)}.mt-0{margin-top:calc(var(--spacing)*0)}.mt-0\.5{margin-top:calc(var(--spacing)*.5)}.mt-1{margin-top:calc(var(--spacing)*1)}.mt-2{margin-top:calc(var(--spacing)*2)}.mt-3{margin-top:calc(var(--spacing)*3)}.mt-4{margin-top:calc(var(--spacing)*4)}.mt-12{margin-top:calc(var(--spacing)*12)}.mr-1{margin-right:calc(var(--spacing)*1)}.mr-1\.5{margin-right:calc(var(--spacing)*1.5)}.mr-2{margin-right:calc(var(--spacing)*2)}.mb-1{margin-bottom:calc(var(--spacing)*1)}.mb-2{margin-bottom:calc(var(--spacing)*2)}.mb-3{margin-bottom:calc(var(--spacing)*3)}.mb-4{margin-bottom:calc(var(--spacing)*4)}.mb-6{margin-bottom:calc(var(--spacing)*6)}.mb-8{margin-bottom:calc(var(--spacing)*8)}.ml-1{margin-left:calc(var(--spacing)*1)}.ml-1\.5{margin-left:calc(var(--spacing)*1.5)}.ml-2{margin-left:calc(var(--spacing)*2)}.ml-3{margin-left:calc(var(--spacing)*3)}.ml-4{margin-left:calc(var(--spacing)*4)}.ml-5{margin-left:calc(var(--spacing)*5)}.ml-auto{margin-left:auto}.line-clamp-3{-webkit-line-clamp:3;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.block{display:block}.contents{display:contents}.flex{display:flex}.grid{display:grid}.hidden{display:none}.inline{display:inline}.inline-block{display:inline-block}.inline-flex{display:inline-flex}.field-sizing-content{field-sizing:content}.size-2{width:calc(var(--spacing)*2);height:calc(var(--spacing)*2)}.size-3\.5{width:calc(var(--spacing)*3.5);height:calc(var(--spacing)*3.5)}.size-4{width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.size-9{width:calc(var(--spacing)*9);height:calc(var(--spacing)*9)}.\!h-2{height:calc(var(--spacing)*2)!important}.h-0{height:calc(var(--spacing)*0)}.h-0\.5{height:calc(var(--spacing)*.5)}.h-1{height:calc(var(--spacing)*1)}.h-2{height:calc(var(--spacing)*2)}.h-2\.5{height:calc(var(--spacing)*2.5)}.h-3{height:calc(var(--spacing)*3)}.h-3\.5{height:calc(var(--spacing)*3.5)}.h-4{height:calc(var(--spacing)*4)}.h-5{height:calc(var(--spacing)*5)}.h-6{height:calc(var(--spacing)*6)}.h-7{height:calc(var(--spacing)*7)}.h-8{height:calc(var(--spacing)*8)}.h-9{height:calc(var(--spacing)*9)}.h-10{height:calc(var(--spacing)*10)}.h-12{height:calc(var(--spacing)*12)}.h-14{height:calc(var(--spacing)*14)}.h-16{height:calc(var(--spacing)*16)}.h-32{height:calc(var(--spacing)*32)}.h-96{height:calc(var(--spacing)*96)}.h-\[1\.2rem\]{height:1.2rem}.h-\[calc\(100vh-3\.5rem\)\]{height:calc(100vh - 3.5rem)}.h-\[calc\(100vh-3\.7rem\)\]{height:calc(100vh - 3.7rem)}.h-\[var\(--radix-select-trigger-height\)\]{height:var(--radix-select-trigger-height)}.h-full{height:100%}.h-px{height:1px}.h-screen{height:100vh}.max-h-\(--radix-dropdown-menu-content-available-height\){max-height:var(--radix-dropdown-menu-content-available-height)}.max-h-\(--radix-select-content-available-height\){max-height:var(--radix-select-content-available-height)}.max-h-20{max-height:calc(var(--spacing)*20)}.max-h-24{max-height:calc(var(--spacing)*24)}.max-h-32{max-height:calc(var(--spacing)*32)}.max-h-64{max-height:calc(var(--spacing)*64)}.max-h-96{max-height:calc(var(--spacing)*96)}.max-h-\[90vh\]{max-height:90vh}.max-h-\[200px\]{max-height:200px}.max-h-none{max-height:none}.max-h-screen{max-height:100vh}.\!min-h-0{min-height:calc(var(--spacing)*0)!important}.min-h-0{min-height:calc(var(--spacing)*0)}.min-h-16{min-height:calc(var(--spacing)*16)}.min-h-\[36px\]{min-height:36px}.min-h-\[40px\]{min-height:40px}.min-h-\[50vh\]{min-height:50vh}.min-h-\[240px\]{min-height:240px}.min-h-screen{min-height:100vh}.\!w-2{width:calc(var(--spacing)*2)!important}.w-1{width:calc(var(--spacing)*1)}.w-2{width:calc(var(--spacing)*2)}.w-2\.5{width:calc(var(--spacing)*2.5)}.w-3{width:calc(var(--spacing)*3)}.w-3\.5{width:calc(var(--spacing)*3.5)}.w-4{width:calc(var(--spacing)*4)}.w-5{width:calc(var(--spacing)*5)}.w-6{width:calc(var(--spacing)*6)}.w-8{width:calc(var(--spacing)*8)}.w-10{width:calc(var(--spacing)*10)}.w-12{width:calc(var(--spacing)*12)}.w-16{width:calc(var(--spacing)*16)}.w-56{width:calc(var(--spacing)*56)}.w-64{width:calc(var(--spacing)*64)}.w-80{width:calc(var(--spacing)*80)}.w-\[1\.2rem\]{width:1.2rem}.w-\[600px\]{width:600px}.w-fit{width:fit-content}.w-full{width:100%}.max-w-2xl{max-width:var(--container-2xl)}.max-w-4xl{max-width:var(--container-4xl)}.max-w-7xl{max-width:var(--container-7xl)}.max-w-\[80\%\]{max-width:80%}.max-w-\[90vw\]{max-width:90vw}.max-w-full{max-width:100%}.max-w-lg{max-width:var(--container-lg)}.max-w-md{max-width:var(--container-md)}.max-w-none{max-width:none}.\!min-w-0{min-width:calc(var(--spacing)*0)!important}.min-w-0{min-width:calc(var(--spacing)*0)}.min-w-\[8rem\]{min-width:8rem}.min-w-\[var\(--radix-select-trigger-width\)\]{min-width:var(--radix-select-trigger-width)}.flex-1{flex:1}.flex-shrink-0,.shrink-0{flex-shrink:0}.origin-\(--radix-dropdown-menu-content-transform-origin\){transform-origin:var(--radix-dropdown-menu-content-transform-origin)}.origin-\(--radix-select-content-transform-origin\){transform-origin:var(--radix-select-content-transform-origin)}.-translate-x-1\/2{--tw-translate-x: -50% ;translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-0{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-4{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.scale-0{--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-75{--tw-scale-x:75%;--tw-scale-y:75%;--tw-scale-z:75%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-100{--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.rotate-0{rotate:none}.rotate-90{rotate:90deg}.transform{transform:var(--tw-rotate-x,)var(--tw-rotate-y,)var(--tw-rotate-z,)var(--tw-skew-x,)var(--tw-skew-y,)}.animate-bounce{animation:var(--animate-bounce)}.animate-in{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.animate-pulse{animation:var(--animate-pulse)}.animate-spin{animation:var(--animate-spin)}.cursor-col-resize{cursor:col-resize}.cursor-default{cursor:default}.cursor-pointer{cursor:pointer}.cursor-row-resize{cursor:row-resize}.touch-none{touch-action:none}.resize{resize:both}.resize-none{resize:none}.scroll-my-1{scroll-margin-block:calc(var(--spacing)*1)}.list-none{list-style-type:none}.auto-rows-min{grid-auto-rows:min-content}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.grid-rows-\[auto_auto\]{grid-template-rows:auto auto}.flex-col{flex-direction:column}.flex-row-reverse{flex-direction:row-reverse}.flex-wrap{flex-wrap:wrap}.items-center{align-items:center}.items-end{align-items:flex-end}.items-start{align-items:flex-start}.items-stretch{align-items:stretch}.justify-between{justify-content:space-between}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}.gap-1{gap:calc(var(--spacing)*1)}.gap-1\.5{gap:calc(var(--spacing)*1.5)}.gap-2{gap:calc(var(--spacing)*2)}.gap-3{gap:calc(var(--spacing)*3)}.gap-4{gap:calc(var(--spacing)*4)}.gap-6{gap:calc(var(--spacing)*6)}:where(.space-y-1>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-2>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*2)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-3>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*3)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*3)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-4>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*4)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*4)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-6>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*6)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*6)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-x-1>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*1)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-x-reverse)))}:where(.space-x-2>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*2)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-x-reverse)))}.self-start{align-self:flex-start}.justify-self-end{justify-self:flex-end}.truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.overflow-auto{overflow:auto}.overflow-hidden{overflow:hidden}.overflow-x-auto{overflow-x:auto}.overflow-x-hidden{overflow-x:hidden}.overflow-y-auto{overflow-y:auto}.\!rounded-full{border-radius:3.40282e38px!important}.rounded{border-radius:.25rem}.rounded-\[4px\]{border-radius:4px}.rounded-\[inherit\]{border-radius:inherit}.rounded-full{border-radius:3.40282e38px}.rounded-lg{border-radius:var(--radius)}.rounded-md{border-radius:calc(var(--radius) - 2px)}.rounded-none{border-radius:0}.rounded-sm{border-radius:calc(var(--radius) - 4px)}.rounded-t{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.rounded-l-none{border-top-left-radius:0;border-bottom-left-radius:0}.rounded-r-none{border-top-right-radius:0;border-bottom-right-radius:0}.\!border{border-style:var(--tw-border-style)!important;border-width:1px!important}.border{border-style:var(--tw-border-style);border-width:1px}.border-2{border-style:var(--tw-border-style);border-width:2px}.border-t{border-top-style:var(--tw-border-style);border-top-width:1px}.border-b{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.border-l{border-left-style:var(--tw-border-style);border-left-width:1px}.border-l-0{border-left-style:var(--tw-border-style);border-left-width:0}.border-l-2{border-left-style:var(--tw-border-style);border-left-width:2px}.border-dashed{--tw-border-style:dashed;border-style:dashed}.\!border-gray-600{border-color:var(--color-gray-600)!important}.border-\[\#643FB2\]{border-color:#643fb2}.border-\[\#643FB2\]\/30{border-color:#643fb24d}.border-\[\#643FB2\]\/40{border-color:#643fb266}.border-amber-200{border-color:var(--color-amber-200)}.border-blue-200{border-color:var(--color-blue-200)}.border-blue-300{border-color:var(--color-blue-300)}.border-blue-400{border-color:var(--color-blue-400)}.border-blue-500\/30{border-color:#3080ff4d}@supports (color:color-mix(in lab,red,red)){.border-blue-500\/30{border-color:color-mix(in oklab,var(--color-blue-500)30%,transparent)}}.border-blue-500\/40{border-color:#3080ff66}@supports (color:color-mix(in lab,red,red)){.border-blue-500\/40{border-color:color-mix(in oklab,var(--color-blue-500)40%,transparent)}}.border-border,.border-border\/50{border-color:var(--border)}@supports (color:color-mix(in lab,red,red)){.border-border\/50{border-color:color-mix(in oklab,var(--border)50%,transparent)}}.border-destructive,.border-destructive\/20{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/20{border-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.border-destructive\/30{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/30{border-color:color-mix(in oklab,var(--destructive)30%,transparent)}}.border-destructive\/50{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/50{border-color:color-mix(in oklab,var(--destructive)50%,transparent)}}.border-destructive\/70{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/70{border-color:color-mix(in oklab,var(--destructive)70%,transparent)}}.border-emerald-300{border-color:var(--color-emerald-300)}.border-gray-200{border-color:var(--color-gray-200)}.border-gray-300{border-color:var(--color-gray-300)}.border-green-200{border-color:var(--color-green-200)}.border-green-500{border-color:var(--color-green-500)}.border-green-500\/30{border-color:#00c7584d}@supports (color:color-mix(in lab,red,red)){.border-green-500\/30{border-color:color-mix(in oklab,var(--color-green-500)30%,transparent)}}.border-green-500\/40{border-color:#00c75866}@supports (color:color-mix(in lab,red,red)){.border-green-500\/40{border-color:color-mix(in oklab,var(--color-green-500)40%,transparent)}}.border-input{border-color:var(--input)}.border-muted{border-color:var(--muted)}.border-orange-200{border-color:var(--color-orange-200)}.border-orange-500{border-color:var(--color-orange-500)}.border-primary\/20{border-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.border-primary\/20{border-color:color-mix(in oklab,var(--primary)20%,transparent)}}.border-red-200{border-color:var(--color-red-200)}.border-red-500{border-color:var(--color-red-500)}.border-transparent{border-color:#0000}.border-yellow-200{border-color:var(--color-yellow-200)}.border-t-transparent{border-top-color:#0000}.border-l-transparent{border-left-color:#0000}.bg-\[\#643FB2\]{background-color:#643fb2}.bg-\[\#643FB2\]\/5{background-color:#643fb20d}.bg-\[\#643FB2\]\/10{background-color:#643fb21a}.bg-accent\/10{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.bg-accent\/10{background-color:color-mix(in oklab,var(--accent)10%,transparent)}}.bg-amber-50{background-color:var(--color-amber-50)}.bg-background,.bg-background\/80{background-color:var(--background)}@supports (color:color-mix(in lab,red,red)){.bg-background\/80{background-color:color-mix(in oklab,var(--background)80%,transparent)}}.bg-black{background-color:var(--color-black)}.bg-black\/50{background-color:#00000080}@supports (color:color-mix(in lab,red,red)){.bg-black\/50{background-color:color-mix(in oklab,var(--color-black)50%,transparent)}}.bg-black\/60{background-color:#0009}@supports (color:color-mix(in lab,red,red)){.bg-black\/60{background-color:color-mix(in oklab,var(--color-black)60%,transparent)}}.bg-blue-50{background-color:var(--color-blue-50)}.bg-blue-50\/80{background-color:#eff6ffcc}@supports (color:color-mix(in lab,red,red)){.bg-blue-50\/80{background-color:color-mix(in oklab,var(--color-blue-50)80%,transparent)}}.bg-blue-100{background-color:var(--color-blue-100)}.bg-blue-500{background-color:var(--color-blue-500)}.bg-blue-500\/5{background-color:#3080ff0d}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/5{background-color:color-mix(in oklab,var(--color-blue-500)5%,transparent)}}.bg-blue-500\/10{background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/10{background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.bg-border{background-color:var(--border)}.bg-card{background-color:var(--card)}.bg-current{background-color:currentColor}.bg-destructive,.bg-destructive\/5{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.bg-destructive\/5{background-color:color-mix(in oklab,var(--destructive)5%,transparent)}}.bg-destructive\/10{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.bg-destructive\/10{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.bg-destructive\/80{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.bg-destructive\/80{background-color:color-mix(in oklab,var(--destructive)80%,transparent)}}.bg-emerald-50{background-color:var(--color-emerald-50)}.bg-emerald-100{background-color:var(--color-emerald-100)}.bg-gray-50{background-color:var(--color-gray-50)}.bg-gray-100{background-color:var(--color-gray-100)}.bg-gray-200{background-color:var(--color-gray-200)}.bg-gray-400{background-color:var(--color-gray-400)}.bg-gray-900\/90{background-color:#101828e6}@supports (color:color-mix(in lab,red,red)){.bg-gray-900\/90{background-color:color-mix(in oklab,var(--color-gray-900)90%,transparent)}}.bg-green-50{background-color:var(--color-green-50)}.bg-green-100{background-color:var(--color-green-100)}.bg-green-500{background-color:var(--color-green-500)}.bg-green-500\/5{background-color:#00c7580d}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/5{background-color:color-mix(in oklab,var(--color-green-500)5%,transparent)}}.bg-green-500\/10{background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/10{background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.bg-muted,.bg-muted\/30{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/30{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.bg-muted\/50{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/50{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.bg-orange-50{background-color:var(--color-orange-50)}.bg-orange-100{background-color:var(--color-orange-100)}.bg-orange-500{background-color:var(--color-orange-500)}.bg-popover{background-color:var(--popover)}.bg-primary,.bg-primary\/10{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/10{background-color:color-mix(in oklab,var(--primary)10%,transparent)}}.bg-primary\/30{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/30{background-color:color-mix(in oklab,var(--primary)30%,transparent)}}.bg-primary\/40{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/40{background-color:color-mix(in oklab,var(--primary)40%,transparent)}}.bg-purple-50{background-color:var(--color-purple-50)}.bg-purple-100{background-color:var(--color-purple-100)}.bg-red-50{background-color:var(--color-red-50)}.bg-red-100{background-color:var(--color-red-100)}.bg-red-500{background-color:var(--color-red-500)}.bg-secondary{background-color:var(--secondary)}.bg-transparent{background-color:#0000}.bg-white{background-color:var(--color-white)}.bg-white\/90{background-color:#ffffffe6}@supports (color:color-mix(in lab,red,red)){.bg-white\/90{background-color:color-mix(in oklab,var(--color-white)90%,transparent)}}.bg-yellow-100{background-color:var(--color-yellow-100)}.fill-current{fill:currentColor}.object-cover{object-fit:cover}.p-0{padding:calc(var(--spacing)*0)}.p-1{padding:calc(var(--spacing)*1)}.p-2{padding:calc(var(--spacing)*2)}.p-3{padding:calc(var(--spacing)*3)}.p-4{padding:calc(var(--spacing)*4)}.p-6{padding:calc(var(--spacing)*6)}.p-8{padding:calc(var(--spacing)*8)}.p-\[1px\]{padding:1px}.px-1{padding-inline:calc(var(--spacing)*1)}.px-1\.5{padding-inline:calc(var(--spacing)*1.5)}.px-2{padding-inline:calc(var(--spacing)*2)}.px-2\.5{padding-inline:calc(var(--spacing)*2.5)}.px-3{padding-inline:calc(var(--spacing)*3)}.px-4{padding-inline:calc(var(--spacing)*4)}.px-6{padding-inline:calc(var(--spacing)*6)}.px-8{padding-inline:calc(var(--spacing)*8)}.py-0{padding-block:calc(var(--spacing)*0)}.py-0\.5{padding-block:calc(var(--spacing)*.5)}.py-1{padding-block:calc(var(--spacing)*1)}.py-1\.5{padding-block:calc(var(--spacing)*1.5)}.py-2{padding-block:calc(var(--spacing)*2)}.py-3{padding-block:calc(var(--spacing)*3)}.py-4{padding-block:calc(var(--spacing)*4)}.py-6{padding-block:calc(var(--spacing)*6)}.py-8{padding-block:calc(var(--spacing)*8)}.pt-0{padding-top:calc(var(--spacing)*0)}.pt-2{padding-top:calc(var(--spacing)*2)}.pt-3{padding-top:calc(var(--spacing)*3)}.pt-4{padding-top:calc(var(--spacing)*4)}.pt-6{padding-top:calc(var(--spacing)*6)}.pt-8{padding-top:calc(var(--spacing)*8)}.pr-2{padding-right:calc(var(--spacing)*2)}.pr-8{padding-right:calc(var(--spacing)*8)}.pb-2{padding-bottom:calc(var(--spacing)*2)}.pb-3{padding-bottom:calc(var(--spacing)*3)}.pb-4{padding-bottom:calc(var(--spacing)*4)}.pb-6{padding-bottom:calc(var(--spacing)*6)}.pb-12{padding-bottom:calc(var(--spacing)*12)}.pl-2{padding-left:calc(var(--spacing)*2)}.pl-3{padding-left:calc(var(--spacing)*3)}.pl-8{padding-left:calc(var(--spacing)*8)}.text-center{text-align:center}.text-left{text-align:left}.font-mono{font-family:var(--font-mono)}.text-2xl{font-size:var(--text-2xl);line-height:var(--tw-leading,var(--text-2xl--line-height))}.text-base{font-size:var(--text-base);line-height:var(--tw-leading,var(--text-base--line-height))}.text-lg{font-size:var(--text-lg);line-height:var(--tw-leading,var(--text-lg--line-height))}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xl{font-size:var(--text-xl);line-height:var(--tw-leading,var(--text-xl--line-height))}.text-xs{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.text-\[10px\]{font-size:10px}.leading-none{--tw-leading:1;line-height:1}.leading-relaxed{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.leading-tight{--tw-leading:var(--leading-tight);line-height:var(--leading-tight)}.font-medium{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.font-semibold{--tw-font-weight:var(--font-weight-semibold);font-weight:var(--font-weight-semibold)}.tracking-widest{--tw-tracking:var(--tracking-widest);letter-spacing:var(--tracking-widest)}.break-words{overflow-wrap:break-word}.break-all{word-break:break-all}.whitespace-nowrap{white-space:nowrap}.whitespace-pre-wrap{white-space:pre-wrap}.text-\[\#643FB2\]{color:#643fb2}.text-amber-500{color:var(--color-amber-500)}.text-amber-600{color:var(--color-amber-600)}.text-amber-700{color:var(--color-amber-700)}.text-amber-900{color:var(--color-amber-900)}.text-blue-500{color:var(--color-blue-500)}.text-blue-600{color:var(--color-blue-600)}.text-blue-700{color:var(--color-blue-700)}.text-blue-800{color:var(--color-blue-800)}.text-card-foreground{color:var(--card-foreground)}.text-current{color:currentColor}.text-destructive{color:var(--destructive)}.text-emerald-600{color:var(--color-emerald-600)}.text-emerald-700{color:var(--color-emerald-700)}.text-emerald-800{color:var(--color-emerald-800)}.text-foreground{color:var(--foreground)}.text-gray-300{color:var(--color-gray-300)}.text-gray-400{color:var(--color-gray-400)}.text-gray-500{color:var(--color-gray-500)}.text-gray-600{color:var(--color-gray-600)}.text-gray-700{color:var(--color-gray-700)}.text-gray-900{color:var(--color-gray-900)}.text-green-500{color:var(--color-green-500)}.text-green-600{color:var(--color-green-600)}.text-green-700{color:var(--color-green-700)}.text-green-800{color:var(--color-green-800)}.text-muted-foreground,.text-muted-foreground\/80{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/80{color:color-mix(in oklab,var(--muted-foreground)80%,transparent)}}.text-orange-500{color:var(--color-orange-500)}.text-orange-600{color:var(--color-orange-600)}.text-orange-800{color:var(--color-orange-800)}.text-popover-foreground{color:var(--popover-foreground)}.text-primary{color:var(--primary)}.text-primary-foreground{color:var(--primary-foreground)}.text-purple-500{color:var(--color-purple-500)}.text-purple-600{color:var(--color-purple-600)}.text-red-500{color:var(--color-red-500)}.text-red-600{color:var(--color-red-600)}.text-red-700{color:var(--color-red-700)}.text-red-800{color:var(--color-red-800)}.text-secondary-foreground{color:var(--secondary-foreground)}.text-white{color:var(--color-white)}.text-yellow-600{color:var(--color-yellow-600)}.text-yellow-700{color:var(--color-yellow-700)}.capitalize{text-transform:capitalize}.underline-offset-4{text-underline-offset:4px}.opacity-0{opacity:0}.opacity-50{opacity:.5}.opacity-60{opacity:.6}.opacity-70{opacity:.7}.opacity-100{opacity:1}.shadow{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-lg{--tw-shadow:0 10px 15px -3px var(--tw-shadow-color,#0000001a),0 4px 6px -4px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-md{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-sm{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-xs{--tw-shadow:0 1px 2px 0 var(--tw-shadow-color,#0000000d);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(1px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-2{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[\#643FB2\]\/20{--tw-shadow-color:#643fb233}@supports (color:color-mix(in lab,red,red)){.shadow-\[\#643FB2\]\/20{--tw-shadow-color:color-mix(in oklab,oklab(47.4316% .069152 -.159147/.2) var(--tw-shadow-alpha),transparent)}}.shadow-green-500\/20{--tw-shadow-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.shadow-green-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-green-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-orange-500\/20{--tw-shadow-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.shadow-orange-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-orange-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-primary\/25{--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.shadow-primary\/25{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)25%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-red-500\/20{--tw-shadow-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.shadow-red-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-red-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.ring-blue-500{--tw-ring-color:var(--color-blue-500)}.ring-offset-2{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.ring-offset-background{--tw-ring-offset-color:var(--background)}.outline-hidden{--tw-outline-style:none;outline-style:none}@media (forced-colors:active){.outline-hidden{outline-offset:2px;outline:2px solid #0000}}.outline{outline-style:var(--tw-outline-style);outline-width:1px}.drop-shadow-lg{--tw-drop-shadow-size:drop-shadow(0 4px 4px var(--tw-drop-shadow-color,#00000026));--tw-drop-shadow:drop-shadow(var(--drop-shadow-lg));filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.filter{filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.backdrop-blur-sm{--tw-backdrop-blur:blur(var(--blur-sm));-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.transition{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to,opacity,box-shadow,transform,translate,scale,rotate,filter,-webkit-backdrop-filter,backdrop-filter,display,visibility,content-visibility,overlay,pointer-events;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-\[color\,box-shadow\]{transition-property:color,box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-all{transition-property:all;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-colors{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-opacity{transition-property:opacity;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-shadow{transition-property:box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-transform{transition-property:transform,translate,scale,rotate;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-none{transition-property:none}.duration-200{--tw-duration:.2s;transition-duration:.2s}.duration-300{--tw-duration:.3s;transition-duration:.3s}.ease-in-out{--tw-ease:var(--ease-in-out);transition-timing-function:var(--ease-in-out)}.outline-none{--tw-outline-style:none;outline-style:none}.select-none{-webkit-user-select:none;user-select:none}.\[animation-delay\:-0\.3s\]{animation-delay:-.3s}.\[animation-delay\:-0\.15s\]{animation-delay:-.15s}.fade-in{--tw-enter-opacity:0}.running{animation-play-state:running}.slide-in-from-bottom-2{--tw-enter-translate-y:calc(2*var(--spacing))}.group-open\:rotate-180:is(:where(.group):is([open],:popover-open,:open) *){rotate:180deg}@media (hover:hover){.group-hover\:bg-primary:is(:where(.group):hover *){background-color:var(--primary)}.group-hover\:opacity-100:is(:where(.group):hover *){opacity:1}.group-hover\:shadow-md:is(:where(.group):hover *){--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)20%,transparent)var(--tw-shadow-alpha),transparent)}}}.group-data-\[disabled\=true\]\:pointer-events-none:is(:where(.group)[data-disabled=true] *){pointer-events:none}.group-data-\[disabled\=true\]\:opacity-50:is(:where(.group)[data-disabled=true] *){opacity:.5}.peer-disabled\:cursor-not-allowed:is(:where(.peer):disabled~*){cursor:not-allowed}.peer-disabled\:opacity-50:is(:where(.peer):disabled~*){opacity:.5}.selection\:bg-primary ::selection{background-color:var(--primary)}.selection\:bg-primary::selection{background-color:var(--primary)}.selection\:text-primary-foreground ::selection{color:var(--primary-foreground)}.selection\:text-primary-foreground::selection{color:var(--primary-foreground)}.file\:inline-flex::file-selector-button{display:inline-flex}.file\:h-7::file-selector-button{height:calc(var(--spacing)*7)}.file\:border-0::file-selector-button{border-style:var(--tw-border-style);border-width:0}.file\:bg-transparent::file-selector-button{background-color:#0000}.file\:text-sm::file-selector-button{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.file\:font-medium::file-selector-button{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.file\:text-foreground::file-selector-button{color:var(--foreground)}.placeholder\:text-muted-foreground::placeholder{color:var(--muted-foreground)}@media (hover:hover){.hover\:border-border:hover{border-color:var(--border)}.hover\:bg-\[\#643FB2\]\/10:hover{background-color:#643fb21a}.hover\:bg-accent:hover{background-color:var(--accent)}.hover\:bg-amber-100:hover{background-color:var(--color-amber-100)}.hover\:bg-background:hover{background-color:var(--background)}.hover\:bg-blue-500\/10:hover{background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.hover\:bg-blue-500\/10:hover{background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.hover\:bg-destructive\/20:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/20:hover{background-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.hover\:bg-destructive\/80:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/80:hover{background-color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:bg-destructive\/90:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/90:hover{background-color:color-mix(in oklab,var(--destructive)90%,transparent)}}.hover\:bg-green-500\/10:hover{background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.hover\:bg-green-500\/10:hover{background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.hover\:bg-muted:hover,.hover\:bg-muted\/30:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/30:hover{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.hover\:bg-muted\/50:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/50:hover{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.hover\:bg-primary\/20:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/20:hover{background-color:color-mix(in oklab,var(--primary)20%,transparent)}}.hover\:bg-primary\/80:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/80:hover{background-color:color-mix(in oklab,var(--primary)80%,transparent)}}.hover\:bg-primary\/90:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/90:hover{background-color:color-mix(in oklab,var(--primary)90%,transparent)}}.hover\:bg-secondary\/80:hover{background-color:var(--secondary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-secondary\/80:hover{background-color:color-mix(in oklab,var(--secondary)80%,transparent)}}.hover\:bg-white:hover{background-color:var(--color-white)}.hover\:text-accent-foreground:hover{color:var(--accent-foreground)}.hover\:text-foreground:hover{color:var(--foreground)}.hover\:underline:hover{text-decoration-line:underline}.hover\:opacity-70:hover{opacity:.7}.hover\:opacity-100:hover{opacity:1}.hover\:shadow-md:hover{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}}.focus\:bg-accent:focus{background-color:var(--accent)}.focus\:text-accent-foreground:focus{color:var(--accent-foreground)}.focus\:ring-2:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-ring:focus{--tw-ring-color:var(--ring)}.focus\:ring-offset-2:focus{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:outline-none:focus{--tw-outline-style:none;outline-style:none}.focus-visible\:border-ring:focus-visible{border-color:var(--ring)}.focus-visible\:ring-2:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-\[3px\]:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(3px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.focus-visible\:ring-ring:focus-visible,.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:color-mix(in oklab,var(--ring)50%,transparent)}}.focus-visible\:ring-offset-2:focus-visible{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus-visible\:outline-none:focus-visible{--tw-outline-style:none;outline-style:none}.disabled\:pointer-events-none:disabled{pointer-events:none}.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}.disabled\:opacity-50:disabled{opacity:.5}.has-data-\[slot\=card-action\]\:grid-cols-\[1fr_auto\]:has([data-slot=card-action]){grid-template-columns:1fr auto}.has-\[\>svg\]\:px-2\.5:has(>svg){padding-inline:calc(var(--spacing)*2.5)}.has-\[\>svg\]\:px-3:has(>svg){padding-inline:calc(var(--spacing)*3)}.has-\[\>svg\]\:px-4:has(>svg){padding-inline:calc(var(--spacing)*4)}.aria-invalid\:border-destructive[aria-invalid=true]{border-color:var(--destructive)}.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.data-\[disabled\]\:pointer-events-none[data-disabled]{pointer-events:none}.data-\[disabled\]\:opacity-50[data-disabled]{opacity:.5}.data-\[inset\]\:pl-8[data-inset]{padding-left:calc(var(--spacing)*8)}.data-\[placeholder\]\:text-muted-foreground[data-placeholder]{color:var(--muted-foreground)}.data-\[side\=bottom\]\:translate-y-1[data-side=bottom]{--tw-translate-y:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=bottom\]\:slide-in-from-top-2[data-side=bottom]{--tw-enter-translate-y:calc(2*var(--spacing)*-1)}.data-\[side\=left\]\:-translate-x-1[data-side=left]{--tw-translate-x:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=left\]\:slide-in-from-right-2[data-side=left]{--tw-enter-translate-x:calc(2*var(--spacing))}.data-\[side\=right\]\:translate-x-1[data-side=right]{--tw-translate-x:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=right\]\:slide-in-from-left-2[data-side=right]{--tw-enter-translate-x:calc(2*var(--spacing)*-1)}.data-\[side\=top\]\:-translate-y-1[data-side=top]{--tw-translate-y:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=top\]\:slide-in-from-bottom-2[data-side=top]{--tw-enter-translate-y:calc(2*var(--spacing))}.data-\[size\=default\]\:h-9[data-size=default]{height:calc(var(--spacing)*9)}.data-\[size\=sm\]\:h-8[data-size=sm]{height:calc(var(--spacing)*8)}:is(.\*\:data-\[slot\=select-value\]\:line-clamp-1>*)[data-slot=select-value]{-webkit-line-clamp:1;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}:is(.\*\:data-\[slot\=select-value\]\:flex>*)[data-slot=select-value]{display:flex}:is(.\*\:data-\[slot\=select-value\]\:items-center>*)[data-slot=select-value]{align-items:center}:is(.\*\:data-\[slot\=select-value\]\:gap-2>*)[data-slot=select-value]{gap:calc(var(--spacing)*2)}.data-\[state\=active\]\:bg-background[data-state=active]{background-color:var(--background)}.data-\[state\=active\]\:text-foreground[data-state=active]{color:var(--foreground)}.data-\[state\=active\]\:shadow[data-state=active]{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.data-\[state\=checked\]\:border-primary[data-state=checked]{border-color:var(--primary)}.data-\[state\=checked\]\:bg-primary[data-state=checked]{background-color:var(--primary)}.data-\[state\=checked\]\:text-primary-foreground[data-state=checked]{color:var(--primary-foreground)}.data-\[state\=closed\]\:animate-out[data-state=closed]{animation:exit var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=closed\]\:fade-out-0[data-state=closed]{--tw-exit-opacity:0}.data-\[state\=closed\]\:zoom-out-95[data-state=closed]{--tw-exit-scale:.95}.data-\[state\=open\]\:animate-in[data-state=open]{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=open\]\:bg-accent[data-state=open]{background-color:var(--accent)}.data-\[state\=open\]\:text-accent-foreground[data-state=open]{color:var(--accent-foreground)}.data-\[state\=open\]\:fade-in-0[data-state=open]{--tw-enter-opacity:0}.data-\[state\=open\]\:zoom-in-95[data-state=open]{--tw-enter-scale:.95}.data-\[variant\=destructive\]\:text-destructive[data-variant=destructive]{color:var(--destructive)}.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.data-\[variant\=destructive\]\:focus\:text-destructive[data-variant=destructive]:focus{color:var(--destructive)}@media (min-width:40rem){.sm\:col-span-2{grid-column:span 2/span 2}.sm\:w-64{width:calc(var(--spacing)*64)}.sm\:max-w-lg{max-width:var(--container-lg)}.sm\:flex-none{flex:none}.sm\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.sm\:flex-row{flex-direction:row}.sm\:items-center{align-items:center}}@media (min-width:48rem){.md\:col-span-2{grid-column:span 2/span 2}.md\:col-start-2{grid-column-start:2}.md\:inline{display:inline}.md\:max-w-2xl{max-width:var(--container-2xl)}.md\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.md\:gap-8{gap:calc(var(--spacing)*8)}.md\:text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}}@media (min-width:64rem){.lg\:col-span-3{grid-column:span 3/span 3}.lg\:max-w-4xl{max-width:var(--container-4xl)}.lg\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.lg\:flex-row{flex-direction:row}.lg\:items-center{align-items:center}.lg\:justify-between{justify-content:space-between}}@media (min-width:80rem){.xl\:col-span-2{grid-column:span 2/span 2}.xl\:col-span-4{grid-column:span 4/span 4}.xl\:max-w-5xl{max-width:var(--container-5xl)}.xl\:grid-cols-4{grid-template-columns:repeat(4,minmax(0,1fr))}}.dark\:scale-0:is(.dark *){--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:scale-100:is(.dark *){--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:-rotate-90:is(.dark *){rotate:-90deg}.dark\:rotate-0:is(.dark *){rotate:none}.dark\:\!border-gray-500:is(.dark *){border-color:var(--color-gray-500)!important}.dark\:\!border-gray-600:is(.dark *){border-color:var(--color-gray-600)!important}.dark\:border-\[\#8B5CF6\]:is(.dark *){border-color:#8b5cf6}.dark\:border-\[\#8B5CF6\]\/30:is(.dark *){border-color:#8b5cf64d}.dark\:border-\[\#8B5CF6\]\/40:is(.dark *){border-color:#8b5cf666}.dark\:border-amber-800:is(.dark *){border-color:var(--color-amber-800)}.dark\:border-amber-900:is(.dark *){border-color:var(--color-amber-900)}.dark\:border-blue-500:is(.dark *){border-color:var(--color-blue-500)}.dark\:border-blue-500\/30:is(.dark *){border-color:#3080ff4d}@supports (color:color-mix(in lab,red,red)){.dark\:border-blue-500\/30:is(.dark *){border-color:color-mix(in oklab,var(--color-blue-500)30%,transparent)}}.dark\:border-blue-500\/40:is(.dark *){border-color:#3080ff66}@supports (color:color-mix(in lab,red,red)){.dark\:border-blue-500\/40:is(.dark *){border-color:color-mix(in oklab,var(--color-blue-500)40%,transparent)}}.dark\:border-blue-600:is(.dark *){border-color:var(--color-blue-600)}.dark\:border-blue-800:is(.dark *){border-color:var(--color-blue-800)}.dark\:border-emerald-600:is(.dark *){border-color:var(--color-emerald-600)}.dark\:border-gray-600:is(.dark *){border-color:var(--color-gray-600)}.dark\:border-gray-700:is(.dark *){border-color:var(--color-gray-700)}.dark\:border-green-400:is(.dark *){border-color:var(--color-green-400)}.dark\:border-green-400\/30:is(.dark *){border-color:#05df724d}@supports (color:color-mix(in lab,red,red)){.dark\:border-green-400\/30:is(.dark *){border-color:color-mix(in oklab,var(--color-green-400)30%,transparent)}}.dark\:border-green-400\/40:is(.dark *){border-color:#05df7266}@supports (color:color-mix(in lab,red,red)){.dark\:border-green-400\/40:is(.dark *){border-color:color-mix(in oklab,var(--color-green-400)40%,transparent)}}.dark\:border-green-800:is(.dark *){border-color:var(--color-green-800)}.dark\:border-input:is(.dark *){border-color:var(--input)}.dark\:border-orange-400:is(.dark *){border-color:var(--color-orange-400)}.dark\:border-orange-800:is(.dark *){border-color:var(--color-orange-800)}.dark\:border-red-400:is(.dark *){border-color:var(--color-red-400)}.dark\:border-red-800:is(.dark *){border-color:var(--color-red-800)}.dark\:\!bg-gray-800\/90:is(.dark *){background-color:#1e2939e6!important}@supports (color:color-mix(in lab,red,red)){.dark\:\!bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)!important}}.dark\:bg-\[\#8B5CF6\]:is(.dark *){background-color:#8b5cf6}.dark\:bg-\[\#8B5CF6\]\/5:is(.dark *){background-color:#8b5cf60d}.dark\:bg-\[\#8B5CF6\]\/10:is(.dark *){background-color:#8b5cf61a}.dark\:bg-amber-950\/20:is(.dark *){background-color:#46190133}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)20%,transparent)}}.dark\:bg-background:is(.dark *){background-color:var(--background)}.dark\:bg-blue-500\/5:is(.dark *){background-color:#3080ff0d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-500\/5:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-500)5%,transparent)}}.dark\:bg-blue-500\/10:is(.dark *){background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.dark\:bg-blue-900:is(.dark *){background-color:var(--color-blue-900)}.dark\:bg-blue-900\/50:is(.dark *){background-color:#1c398e80}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-900\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-900)50%,transparent)}}.dark\:bg-blue-950\/20:is(.dark *){background-color:#16245633}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)20%,transparent)}}.dark\:bg-blue-950\/40:is(.dark *){background-color:#16245666}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/40:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)40%,transparent)}}.dark\:bg-blue-950\/50:is(.dark *){background-color:#16245680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)50%,transparent)}}.dark\:bg-card:is(.dark *){background-color:var(--card)}.dark\:bg-destructive\/20:is(.dark *){background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-destructive\/20:is(.dark *){background-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.dark\:bg-destructive\/60:is(.dark *){background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-destructive\/60:is(.dark *){background-color:color-mix(in oklab,var(--destructive)60%,transparent)}}.dark\:bg-emerald-900\/50:is(.dark *){background-color:#004e3b80}@supports (color:color-mix(in lab,red,red)){.dark\:bg-emerald-900\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-emerald-900)50%,transparent)}}.dark\:bg-emerald-950\/50:is(.dark *){background-color:#002c2280}@supports (color:color-mix(in lab,red,red)){.dark\:bg-emerald-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-emerald-950)50%,transparent)}}.dark\:bg-gray-500:is(.dark *){background-color:var(--color-gray-500)}.dark\:bg-gray-800:is(.dark *){background-color:var(--color-gray-800)}.dark\:bg-gray-800\/90:is(.dark *){background-color:#1e2939e6}@supports (color:color-mix(in lab,red,red)){.dark\:bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)}}.dark\:bg-gray-900:is(.dark *){background-color:var(--color-gray-900)}.dark\:bg-green-400:is(.dark *){background-color:var(--color-green-400)}.dark\:bg-green-400\/5:is(.dark *){background-color:#05df720d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-400\/5:is(.dark *){background-color:color-mix(in oklab,var(--color-green-400)5%,transparent)}}.dark\:bg-green-400\/10:is(.dark *){background-color:#05df721a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-400\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-green-400)10%,transparent)}}.dark\:bg-green-900:is(.dark *){background-color:var(--color-green-900)}.dark\:bg-green-950:is(.dark *){background-color:var(--color-green-950)}.dark\:bg-green-950\/20:is(.dark *){background-color:#032e1533}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)20%,transparent)}}.dark\:bg-green-950\/50:is(.dark *){background-color:#032e1580}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)50%,transparent)}}.dark\:bg-input\/30:is(.dark *){background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-input\/30:is(.dark *){background-color:color-mix(in oklab,var(--input)30%,transparent)}}.dark\:bg-orange-400:is(.dark *){background-color:var(--color-orange-400)}.dark\:bg-orange-900:is(.dark *){background-color:var(--color-orange-900)}.dark\:bg-orange-950:is(.dark *){background-color:var(--color-orange-950)}.dark\:bg-orange-950\/50:is(.dark *){background-color:#44130680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)50%,transparent)}}.dark\:bg-purple-900:is(.dark *){background-color:var(--color-purple-900)}.dark\:bg-red-400:is(.dark *){background-color:var(--color-red-400)}.dark\:bg-red-900:is(.dark *){background-color:var(--color-red-900)}.dark\:bg-red-950:is(.dark *){background-color:var(--color-red-950)}.dark\:bg-red-950\/20:is(.dark *){background-color:#46080933}@supports (color:color-mix(in lab,red,red)){.dark\:bg-red-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-red-950)20%,transparent)}}.dark\:bg-red-950\/50:is(.dark *){background-color:#46080980}@supports (color:color-mix(in lab,red,red)){.dark\:bg-red-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-red-950)50%,transparent)}}.dark\:text-\[\#8B5CF6\]:is(.dark *){color:#8b5cf6}.dark\:text-amber-100:is(.dark *){color:var(--color-amber-100)}.dark\:text-amber-300:is(.dark *){color:var(--color-amber-300)}.dark\:text-amber-400:is(.dark *){color:var(--color-amber-400)}.dark\:text-amber-500:is(.dark *){color:var(--color-amber-500)}.dark\:text-blue-200:is(.dark *){color:var(--color-blue-200)}.dark\:text-blue-300:is(.dark *){color:var(--color-blue-300)}.dark\:text-blue-400:is(.dark *){color:var(--color-blue-400)}.dark\:text-blue-500:is(.dark *){color:var(--color-blue-500)}.dark\:text-emerald-200:is(.dark *){color:var(--color-emerald-200)}.dark\:text-emerald-300:is(.dark *){color:var(--color-emerald-300)}.dark\:text-emerald-400:is(.dark *){color:var(--color-emerald-400)}.dark\:text-gray-100:is(.dark *){color:var(--color-gray-100)}.dark\:text-gray-300:is(.dark *){color:var(--color-gray-300)}.dark\:text-gray-400:is(.dark *){color:var(--color-gray-400)}.dark\:text-green-200:is(.dark *){color:var(--color-green-200)}.dark\:text-green-300:is(.dark *){color:var(--color-green-300)}.dark\:text-green-400:is(.dark *){color:var(--color-green-400)}.dark\:text-orange-200:is(.dark *){color:var(--color-orange-200)}.dark\:text-orange-400:is(.dark *){color:var(--color-orange-400)}.dark\:text-purple-400:is(.dark *){color:var(--color-purple-400)}.dark\:text-red-200:is(.dark *){color:var(--color-red-200)}.dark\:text-red-400:is(.dark *){color:var(--color-red-400)}.dark\:text-yellow-400:is(.dark *){color:var(--color-yellow-400)}.dark\:opacity-30:is(.dark *){opacity:.3}@media (hover:hover){.dark\:hover\:bg-\[\#8B5CF6\]\/10:is(.dark *):hover{background-color:#8b5cf61a}.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:#4619014d}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-amber-950)30%,transparent)}}.dark\:hover\:bg-blue-500\/10:is(.dark *):hover{background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-blue-500\/10:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.dark\:hover\:bg-destructive\/30:is(.dark *):hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-destructive\/30:is(.dark *):hover{background-color:color-mix(in oklab,var(--destructive)30%,transparent)}}.dark\:hover\:bg-gray-800:is(.dark *):hover{background-color:var(--color-gray-800)}.dark\:hover\:bg-green-400\/10:is(.dark *):hover{background-color:#05df721a}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-green-400\/10:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-green-400)10%,transparent)}}.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--input)50%,transparent)}}}.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:data-\[state\=checked\]\:bg-primary:is(.dark *)[data-state=checked]{background-color:var(--primary)}.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.\[\&_svg\]\:pointer-events-none svg{pointer-events:none}.\[\&_svg\]\:shrink-0 svg{flex-shrink:0}.\[\&_svg\:not\(\[class\*\=\'size-\'\]\)\]\:size-4 svg:not([class*=size-]){width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.\[\&_svg\:not\(\[class\*\=\'text-\'\]\)\]\:text-muted-foreground svg:not([class*=text-]){color:var(--muted-foreground)}.\[\.border-b\]\:pb-6.border-b{padding-bottom:calc(var(--spacing)*6)}.\[\.border-t\]\:pt-6.border-t{padding-top:calc(var(--spacing)*6)}:is(.\*\:\[span\]\:last\:flex>*):is(span):last-child{display:flex}:is(.\*\:\[span\]\:last\:items-center>*):is(span):last-child{align-items:center}:is(.\*\:\[span\]\:last\:gap-2>*):is(span):last-child{gap:calc(var(--spacing)*2)}:is(.data-\[variant\=destructive\]\:\*\:\[svg\]\:\!text-destructive[data-variant=destructive]>*):is(svg){color:var(--destructive)!important}}@property --tw-animation-delay{syntax:"*";inherits:false;initial-value:0s}@property --tw-animation-direction{syntax:"*";inherits:false;initial-value:normal}@property --tw-animation-duration{syntax:"*";inherits:false}@property --tw-animation-fill-mode{syntax:"*";inherits:false;initial-value:none}@property --tw-animation-iteration-count{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-translate-y{syntax:"*";inherits:false;initial-value:0}:root{--radius:.625rem;--background:oklch(100% 0 0);--foreground:oklch(14.5% 0 0);--card:oklch(100% 0 0);--card-foreground:oklch(14.5% 0 0);--popover:oklch(100% 0 0);--popover-foreground:oklch(14.5% 0 0);--primary:oklch(48% .18 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(97% 0 0);--secondary-foreground:oklch(20.5% 0 0);--muted:oklch(97% 0 0);--muted-foreground:oklch(55.6% 0 0);--accent:oklch(97% 0 0);--accent-foreground:oklch(20.5% 0 0);--destructive:oklch(57.7% .245 27.325);--border:oklch(92.2% 0 0);--input:oklch(92.2% 0 0);--ring:oklch(70.8% 0 0);--chart-1:oklch(64.6% .222 41.116);--chart-2:oklch(60% .118 184.704);--chart-3:oklch(39.8% .07 227.392);--chart-4:oklch(82.8% .189 84.429);--chart-5:oklch(76.9% .188 70.08);--sidebar:oklch(98.5% 0 0);--sidebar-foreground:oklch(14.5% 0 0);--sidebar-primary:oklch(20.5% 0 0);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(97% 0 0);--sidebar-accent-foreground:oklch(20.5% 0 0);--sidebar-border:oklch(92.2% 0 0);--sidebar-ring:oklch(70.8% 0 0)}.dark{--background:oklch(14.5% 0 0);--foreground:oklch(98.5% 0 0);--card:oklch(20.5% 0 0);--card-foreground:oklch(98.5% 0 0);--popover:oklch(20.5% 0 0);--popover-foreground:oklch(98.5% 0 0);--primary:oklch(62% .2 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(26.9% 0 0);--secondary-foreground:oklch(98.5% 0 0);--muted:oklch(26.9% 0 0);--muted-foreground:oklch(70.8% 0 0);--accent:oklch(26.9% 0 0);--accent-foreground:oklch(98.5% 0 0);--destructive:oklch(70.4% .191 22.216);--border:oklch(100% 0 0/.1);--input:oklch(100% 0 0/.15);--ring:oklch(55.6% 0 0);--chart-1:oklch(48.8% .243 264.376);--chart-2:oklch(69.6% .17 162.48);--chart-3:oklch(76.9% .188 70.08);--chart-4:oklch(62.7% .265 303.9);--chart-5:oklch(64.5% .246 16.439);--sidebar:oklch(20.5% 0 0);--sidebar-foreground:oklch(98.5% 0 0);--sidebar-primary:oklch(48.8% .243 264.376);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(26.9% 0 0);--sidebar-accent-foreground:oklch(98.5% 0 0);--sidebar-border:oklch(100% 0 0/.1);--sidebar-ring:oklch(55.6% 0 0)}.workflow-chat-view .border-green-200{border-color:var(--color-emerald-200)}.workflow-chat-view .bg-green-50{background-color:var(--color-emerald-50)}.workflow-chat-view .bg-green-100{background-color:var(--color-emerald-100)}.workflow-chat-view .text-green-600{color:var(--color-emerald-600)}.workflow-chat-view .text-green-700{color:var(--color-emerald-700)}.workflow-chat-view .text-green-800{color:var(--color-emerald-800)}@property --tw-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-z{syntax:"*";inherits:false;initial-value:0}@property --tw-scale-x{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-y{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-z{syntax:"*";inherits:false;initial-value:1}@property --tw-rotate-x{syntax:"*";inherits:false}@property --tw-rotate-y{syntax:"*";inherits:false}@property --tw-rotate-z{syntax:"*";inherits:false}@property --tw-skew-x{syntax:"*";inherits:false}@property --tw-skew-y{syntax:"*";inherits:false}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-space-x-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-leading{syntax:"*";inherits:false}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-tracking{syntax:"*";inherits:false}@property --tw-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-shadow-color{syntax:"*";inherits:false}@property --tw-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-inset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-shadow-color{syntax:"*";inherits:false}@property --tw-inset-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-ring-color{syntax:"*";inherits:false}@property --tw-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-ring-color{syntax:"*";inherits:false}@property --tw-inset-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-ring-inset{syntax:"*";inherits:false}@property --tw-ring-offset-width{syntax:"";inherits:false;initial-value:0}@property --tw-ring-offset-color{syntax:"*";inherits:false;initial-value:#fff}@property --tw-ring-offset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-outline-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-blur{syntax:"*";inherits:false}@property --tw-brightness{syntax:"*";inherits:false}@property --tw-contrast{syntax:"*";inherits:false}@property --tw-grayscale{syntax:"*";inherits:false}@property --tw-hue-rotate{syntax:"*";inherits:false}@property --tw-invert{syntax:"*";inherits:false}@property --tw-opacity{syntax:"*";inherits:false}@property --tw-saturate{syntax:"*";inherits:false}@property --tw-sepia{syntax:"*";inherits:false}@property --tw-drop-shadow{syntax:"*";inherits:false}@property --tw-drop-shadow-color{syntax:"*";inherits:false}@property --tw-drop-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-drop-shadow-size{syntax:"*";inherits:false}@property --tw-backdrop-blur{syntax:"*";inherits:false}@property --tw-backdrop-brightness{syntax:"*";inherits:false}@property --tw-backdrop-contrast{syntax:"*";inherits:false}@property --tw-backdrop-grayscale{syntax:"*";inherits:false}@property --tw-backdrop-hue-rotate{syntax:"*";inherits:false}@property --tw-backdrop-invert{syntax:"*";inherits:false}@property --tw-backdrop-opacity{syntax:"*";inherits:false}@property --tw-backdrop-saturate{syntax:"*";inherits:false}@property --tw-backdrop-sepia{syntax:"*";inherits:false}@property --tw-duration{syntax:"*";inherits:false}@property --tw-ease{syntax:"*";inherits:false}@keyframes spin{to{transform:rotate(360deg)}}@keyframes pulse{50%{opacity:.5}}@keyframes bounce{0%,to{animation-timing-function:cubic-bezier(.8,0,1,1);transform:translateY(-25%)}50%{animation-timing-function:cubic-bezier(0,0,.2,1);transform:none}}@keyframes enter{0%{opacity:var(--tw-enter-opacity,1);transform:translate3d(var(--tw-enter-translate-x,0),var(--tw-enter-translate-y,0),0)scale3d(var(--tw-enter-scale,1),var(--tw-enter-scale,1),var(--tw-enter-scale,1))rotate(var(--tw-enter-rotate,0));filter:blur(var(--tw-enter-blur,0))}}@keyframes exit{to{opacity:var(--tw-exit-opacity,1);transform:translate3d(var(--tw-exit-translate-x,0),var(--tw-exit-translate-y,0),0)scale3d(var(--tw-exit-scale,1),var(--tw-exit-scale,1),var(--tw-exit-scale,1))rotate(var(--tw-exit-rotate,0));filter:blur(var(--tw-exit-blur,0))}}.react-flow{direction:ltr;--xy-edge-stroke-default: #b1b1b7;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #555;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(255, 255, 255, .5);--xy-minimap-background-color-default: #fff;--xy-minimap-mask-background-color-default: rgba(240, 240, 240, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #e2e2e2;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: transparent;--xy-background-pattern-dots-color-default: #91919a;--xy-background-pattern-lines-color-default: #eee;--xy-background-pattern-cross-color-default: #e2e2e2;background-color:var(--xy-background-color, var(--xy-background-color-default));--xy-node-color-default: inherit;--xy-node-border-default: 1px solid #1a192b;--xy-node-background-color-default: #fff;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(0, 0, 0, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #1a192b;--xy-node-border-radius-default: 3px;--xy-handle-background-color-default: #1a192b;--xy-handle-border-color-default: #fff;--xy-selection-background-color-default: rgba(0, 89, 220, .08);--xy-selection-border-default: 1px dotted rgba(0, 89, 220, .8);--xy-controls-button-background-color-default: #fefefe;--xy-controls-button-background-color-hover-default: #f4f4f4;--xy-controls-button-color-default: inherit;--xy-controls-button-color-hover-default: inherit;--xy-controls-button-border-color-default: #eee;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #ffffff;--xy-edge-label-color-default: inherit;--xy-resize-background-color-default: #3367d9}.react-flow.dark{--xy-edge-stroke-default: #3e3e3e;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #727272;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(150, 150, 150, .25);--xy-minimap-background-color-default: #141414;--xy-minimap-mask-background-color-default: rgba(60, 60, 60, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #2b2b2b;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: #141414;--xy-background-pattern-dots-color-default: #777;--xy-background-pattern-lines-color-default: #777;--xy-background-pattern-cross-color-default: #777;--xy-node-color-default: #f8f8f8;--xy-node-border-default: 1px solid #3c3c3c;--xy-node-background-color-default: #1e1e1e;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(255, 255, 255, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #999;--xy-handle-background-color-default: #bebebe;--xy-handle-border-color-default: #1e1e1e;--xy-selection-background-color-default: rgba(200, 200, 220, .08);--xy-selection-border-default: 1px dotted rgba(200, 200, 220, .8);--xy-controls-button-background-color-default: #2b2b2b;--xy-controls-button-background-color-hover-default: #3e3e3e;--xy-controls-button-color-default: #f8f8f8;--xy-controls-button-color-hover-default: #fff;--xy-controls-button-border-color-default: #5b5b5b;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #141414;--xy-edge-label-color-default: #f8f8f8}.react-flow__background{background-color:var(--xy-background-color-props, var(--xy-background-color, var(--xy-background-color-default)));pointer-events:none;z-index:-1}.react-flow__container{position:absolute;width:100%;height:100%;top:0;left:0}.react-flow__pane{z-index:1}.react-flow__pane.draggable{cursor:grab}.react-flow__pane.dragging{cursor:grabbing}.react-flow__pane.selection{cursor:pointer}.react-flow__viewport{transform-origin:0 0;z-index:2;pointer-events:none}.react-flow__renderer{z-index:4}.react-flow__selection{z-index:6}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible{outline:none}.react-flow__edge-path{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default));stroke-width:var(--xy-edge-stroke-width, var(--xy-edge-stroke-width-default));fill:none}.react-flow__connection-path{stroke:var(--xy-connectionline-stroke, var(--xy-connectionline-stroke-default));stroke-width:var(--xy-connectionline-stroke-width, var(--xy-connectionline-stroke-width-default));fill:none}.react-flow .react-flow__edges{position:absolute}.react-flow .react-flow__edges svg{overflow:visible;position:absolute;pointer-events:none}.react-flow__edge{pointer-events:visibleStroke}.react-flow__edge.selectable{cursor:pointer}.react-flow__edge.animated path{stroke-dasharray:5;animation:dashdraw .5s linear infinite}.react-flow__edge.animated path.react-flow__edge-interaction{stroke-dasharray:none;animation:none}.react-flow__edge.inactive{pointer-events:none}.react-flow__edge.selected,.react-flow__edge:focus,.react-flow__edge:focus-visible{outline:none}.react-flow__edge.selected .react-flow__edge-path,.react-flow__edge.selectable:focus .react-flow__edge-path,.react-flow__edge.selectable:focus-visible .react-flow__edge-path{stroke:var(--xy-edge-stroke-selected, var(--xy-edge-stroke-selected-default))}.react-flow__edge-textwrapper{pointer-events:all}.react-flow__edge .react-flow__edge-text{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__arrowhead polyline{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__arrowhead polyline.arrowclosed{fill:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__connection{pointer-events:none}.react-flow__connection .animated{stroke-dasharray:5;animation:dashdraw .5s linear infinite}svg.react-flow__connectionline{z-index:1001;overflow:visible;position:absolute}.react-flow__nodes{pointer-events:none;transform-origin:0 0}.react-flow__node{position:absolute;-webkit-user-select:none;-moz-user-select:none;user-select:none;pointer-events:all;transform-origin:0 0;box-sizing:border-box;cursor:default}.react-flow__node.selectable{cursor:pointer}.react-flow__node.draggable{cursor:grab;pointer-events:all}.react-flow__node.draggable.dragging{cursor:grabbing}.react-flow__nodesselection{z-index:3;transform-origin:left top;pointer-events:none}.react-flow__nodesselection-rect{position:absolute;pointer-events:all;cursor:grab}.react-flow__handle{position:absolute;pointer-events:none;min-width:5px;min-height:5px;width:6px;height:6px;background-color:var(--xy-handle-background-color, var(--xy-handle-background-color-default));border:1px solid var(--xy-handle-border-color, var(--xy-handle-border-color-default));border-radius:100%}.react-flow__handle.connectingfrom{pointer-events:all}.react-flow__handle.connectionindicator{pointer-events:all;cursor:crosshair}.react-flow__handle-bottom{top:auto;left:50%;bottom:0;transform:translate(-50%,50%)}.react-flow__handle-top{top:0;left:50%;transform:translate(-50%,-50%)}.react-flow__handle-left{top:50%;left:0;transform:translate(-50%,-50%)}.react-flow__handle-right{top:50%;right:0;transform:translate(50%,-50%)}.react-flow__edgeupdater{cursor:move;pointer-events:all}.react-flow__pane.selection .react-flow__panel{pointer-events:none}.react-flow__panel{position:absolute;z-index:5;margin:15px}.react-flow__panel.top{top:0}.react-flow__panel.bottom{bottom:0}.react-flow__panel.top.center,.react-flow__panel.bottom.center{left:50%;transform:translate(-15px) translate(-50%)}.react-flow__panel.left{left:0}.react-flow__panel.right{right:0}.react-flow__panel.left.center,.react-flow__panel.right.center{top:50%;transform:translateY(-15px) translateY(-50%)}.react-flow__attribution{font-size:10px;background:var(--xy-attribution-background-color, var(--xy-attribution-background-color-default));padding:2px 3px;margin:0}.react-flow__attribution a{text-decoration:none;color:#999}@keyframes dashdraw{0%{stroke-dashoffset:10}}.react-flow__edgelabel-renderer{position:absolute;width:100%;height:100%;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none;left:0;top:0}.react-flow__viewport-portal{position:absolute;width:100%;height:100%;left:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__minimap{background:var( --xy-minimap-background-color-props, var(--xy-minimap-background-color, var(--xy-minimap-background-color-default)) )}.react-flow__minimap-svg{display:block}.react-flow__minimap-mask{fill:var( --xy-minimap-mask-background-color-props, var(--xy-minimap-mask-background-color, var(--xy-minimap-mask-background-color-default)) );stroke:var( --xy-minimap-mask-stroke-color-props, var(--xy-minimap-mask-stroke-color, var(--xy-minimap-mask-stroke-color-default)) );stroke-width:var( --xy-minimap-mask-stroke-width-props, var(--xy-minimap-mask-stroke-width, var(--xy-minimap-mask-stroke-width-default)) )}.react-flow__minimap-node{fill:var( --xy-minimap-node-background-color-props, var(--xy-minimap-node-background-color, var(--xy-minimap-node-background-color-default)) );stroke:var( --xy-minimap-node-stroke-color-props, var(--xy-minimap-node-stroke-color, var(--xy-minimap-node-stroke-color-default)) );stroke-width:var( --xy-minimap-node-stroke-width-props, var(--xy-minimap-node-stroke-width, var(--xy-minimap-node-stroke-width-default)) )}.react-flow__background-pattern.dots{fill:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-dots-color-default)) )}.react-flow__background-pattern.lines{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-lines-color-default)) )}.react-flow__background-pattern.cross{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-cross-color-default)) )}.react-flow__controls{display:flex;flex-direction:column;box-shadow:var(--xy-controls-box-shadow, var(--xy-controls-box-shadow-default))}.react-flow__controls.horizontal{flex-direction:row}.react-flow__controls-button{display:flex;justify-content:center;align-items:center;height:26px;width:26px;padding:4px;border:none;background:var(--xy-controls-button-background-color, var(--xy-controls-button-background-color-default));border-bottom:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) );color:var( --xy-controls-button-color-props, var(--xy-controls-button-color, var(--xy-controls-button-color-default)) );cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__controls-button svg{width:100%;max-width:12px;max-height:12px;fill:currentColor}.react-flow__edge.updating .react-flow__edge-path{stroke:#777}.react-flow__edge-text{font-size:10px}.react-flow__node.selectable:focus,.react-flow__node.selectable:focus-visible{outline:none}.react-flow__node-input,.react-flow__node-default,.react-flow__node-output,.react-flow__node-group{padding:10px;border-radius:var(--xy-node-border-radius, var(--xy-node-border-radius-default));width:150px;font-size:12px;color:var(--xy-node-color, var(--xy-node-color-default));text-align:center;border:var(--xy-node-border, var(--xy-node-border-default));background-color:var(--xy-node-background-color, var(--xy-node-background-color-default))}.react-flow__node-input.selectable:hover,.react-flow__node-default.selectable:hover,.react-flow__node-output.selectable:hover,.react-flow__node-group.selectable:hover{box-shadow:var(--xy-node-boxshadow-hover, var(--xy-node-boxshadow-hover-default))}.react-flow__node-input.selectable.selected,.react-flow__node-input.selectable:focus,.react-flow__node-input.selectable:focus-visible,.react-flow__node-default.selectable.selected,.react-flow__node-default.selectable:focus,.react-flow__node-default.selectable:focus-visible,.react-flow__node-output.selectable.selected,.react-flow__node-output.selectable:focus,.react-flow__node-output.selectable:focus-visible,.react-flow__node-group.selectable.selected,.react-flow__node-group.selectable:focus,.react-flow__node-group.selectable:focus-visible{box-shadow:var(--xy-node-boxshadow-selected, var(--xy-node-boxshadow-selected-default))}.react-flow__node-group{background-color:var(--xy-node-group-background-color, var(--xy-node-group-background-color-default))}.react-flow__nodesselection-rect,.react-flow__selection{background:var(--xy-selection-background-color, var(--xy-selection-background-color-default));border:var(--xy-selection-border, var(--xy-selection-border-default))}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible,.react-flow__selection:focus,.react-flow__selection:focus-visible{outline:none}.react-flow__controls-button:hover{background:var( --xy-controls-button-background-color-hover-props, var(--xy-controls-button-background-color-hover, var(--xy-controls-button-background-color-hover-default)) );color:var( --xy-controls-button-color-hover-props, var(--xy-controls-button-color-hover, var(--xy-controls-button-color-hover-default)) )}.react-flow__controls-button:disabled{pointer-events:none}.react-flow__controls-button:disabled svg{fill-opacity:.4}.react-flow__controls-button:last-child{border-bottom:none}.react-flow__controls.horizontal .react-flow__controls-button{border-bottom:none;border-right:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) )}.react-flow__controls.horizontal .react-flow__controls-button:last-child{border-right:none}.react-flow__resize-control{position:absolute}.react-flow__resize-control.left,.react-flow__resize-control.right{cursor:ew-resize}.react-flow__resize-control.top,.react-flow__resize-control.bottom{cursor:ns-resize}.react-flow__resize-control.top.left,.react-flow__resize-control.bottom.right{cursor:nwse-resize}.react-flow__resize-control.bottom.left,.react-flow__resize-control.top.right{cursor:nesw-resize}.react-flow__resize-control.handle{width:5px;height:5px;border:1px solid #fff;border-radius:1px;background-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));translate:-50% -50%}.react-flow__resize-control.handle.left{left:0;top:50%}.react-flow__resize-control.handle.right{left:100%;top:50%}.react-flow__resize-control.handle.top{left:50%;top:0}.react-flow__resize-control.handle.bottom{left:50%;top:100%}.react-flow__resize-control.handle.top.left,.react-flow__resize-control.handle.bottom.left{left:0}.react-flow__resize-control.handle.top.right,.react-flow__resize-control.handle.bottom.right{left:100%}.react-flow__resize-control.line{border-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));border-width:0;border-style:solid}.react-flow__resize-control.line.left,.react-flow__resize-control.line.right{width:1px;transform:translate(-50%);top:0;height:100%}.react-flow__resize-control.line.left{left:0;border-left-width:1px}.react-flow__resize-control.line.right{left:100%;border-right-width:1px}.react-flow__resize-control.line.top,.react-flow__resize-control.line.bottom{height:1px;transform:translateY(-50%);left:0;width:100%}.react-flow__resize-control.line.top{top:0;border-top-width:1px}.react-flow__resize-control.line.bottom{border-bottom-width:1px;top:100%}.react-flow__edge-textbg{fill:var(--xy-edge-label-background-color, var(--xy-edge-label-background-color-default))}.react-flow__edge-text{fill:var(--xy-edge-label-color, var(--xy-edge-label-color-default))} diff --git a/python/packages/devui/agent_framework_devui/ui/assets/index-D0SfShuZ.js b/python/packages/devui/agent_framework_devui/ui/assets/index-D0SfShuZ.js deleted file mode 100644 index ba82315af1..0000000000 --- a/python/packages/devui/agent_framework_devui/ui/assets/index-D0SfShuZ.js +++ /dev/null @@ -1,445 +0,0 @@ -function v_(e,r){for(var o=0;os[i]})}}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}(function(){const r=document.createElement("link").relList;if(r&&r.supports&&r.supports("modulepreload"))return;for(const i of document.querySelectorAll('link[rel="modulepreload"]'))s(i);new MutationObserver(i=>{for(const u of i)if(u.type==="childList")for(const d of u.addedNodes)d.tagName==="LINK"&&d.rel==="modulepreload"&&s(d)}).observe(document,{childList:!0,subtree:!0});function o(i){const u={};return i.integrity&&(u.integrity=i.integrity),i.referrerPolicy&&(u.referrerPolicy=i.referrerPolicy),i.crossOrigin==="use-credentials"?u.credentials="include":i.crossOrigin==="anonymous"?u.credentials="omit":u.credentials="same-origin",u}function s(i){if(i.ep)return;i.ep=!0;const u=o(i);fetch(i.href,u)}})();function Rm(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var ph={exports:{}},ui={};/** - * @license React - * react-jsx-runtime.production.js - * - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var cy;function b_(){if(cy)return ui;cy=1;var e=Symbol.for("react.transitional.element"),r=Symbol.for("react.fragment");function o(s,i,u){var d=null;if(u!==void 0&&(d=""+u),i.key!==void 0&&(d=""+i.key),"key"in i){u={};for(var f in i)f!=="key"&&(u[f]=i[f])}else u=i;return i=u.ref,{$$typeof:e,type:s,key:d,ref:i!==void 0?i:null,props:u}}return ui.Fragment=r,ui.jsx=o,ui.jsxs=o,ui}var uy;function w_(){return uy||(uy=1,ph.exports=b_()),ph.exports}var c=w_(),gh={exports:{}},Re={};/** - * @license React - * react.production.js - * - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var dy;function S_(){if(dy)return Re;dy=1;var e=Symbol.for("react.transitional.element"),r=Symbol.for("react.portal"),o=Symbol.for("react.fragment"),s=Symbol.for("react.strict_mode"),i=Symbol.for("react.profiler"),u=Symbol.for("react.consumer"),d=Symbol.for("react.context"),f=Symbol.for("react.forward_ref"),m=Symbol.for("react.suspense"),p=Symbol.for("react.memo"),g=Symbol.for("react.lazy"),v=Symbol.iterator;function y(k){return k===null||typeof k!="object"?null:(k=v&&k[v]||k["@@iterator"],typeof k=="function"?k:null)}var w={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},N=Object.assign,S={};function _(k,B,K){this.props=k,this.context=B,this.refs=S,this.updater=K||w}_.prototype.isReactComponent={},_.prototype.setState=function(k,B){if(typeof k!="object"&&typeof k!="function"&&k!=null)throw Error("takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,k,B,"setState")},_.prototype.forceUpdate=function(k){this.updater.enqueueForceUpdate(this,k,"forceUpdate")};function A(){}A.prototype=_.prototype;function R(k,B,K){this.props=k,this.context=B,this.refs=S,this.updater=K||w}var E=R.prototype=new A;E.constructor=R,N(E,_.prototype),E.isPureReactComponent=!0;var M=Array.isArray,O={H:null,A:null,T:null,S:null,V:null},H=Object.prototype.hasOwnProperty;function I(k,B,K,Z,te,he){return K=he.ref,{$$typeof:e,type:k,key:B,ref:K!==void 0?K:null,props:he}}function U(k,B){return I(k.type,B,void 0,void 0,void 0,k.props)}function Y(k){return typeof k=="object"&&k!==null&&k.$$typeof===e}function Q(k){var B={"=":"=0",":":"=2"};return"$"+k.replace(/[=:]/g,function(K){return B[K]})}var ee=/\/+/g;function q(k,B){return typeof k=="object"&&k!==null&&k.key!=null?Q(""+k.key):B.toString(36)}function X(){}function z(k){switch(k.status){case"fulfilled":return k.value;case"rejected":throw k.reason;default:switch(typeof k.status=="string"?k.then(X,X):(k.status="pending",k.then(function(B){k.status==="pending"&&(k.status="fulfilled",k.value=B)},function(B){k.status==="pending"&&(k.status="rejected",k.reason=B)})),k.status){case"fulfilled":return k.value;case"rejected":throw k.reason}}throw k}function $(k,B,K,Z,te){var he=typeof k;(he==="undefined"||he==="boolean")&&(k=null);var fe=!1;if(k===null)fe=!0;else switch(he){case"bigint":case"string":case"number":fe=!0;break;case"object":switch(k.$$typeof){case e:case r:fe=!0;break;case g:return fe=k._init,$(fe(k._payload),B,K,Z,te)}}if(fe)return te=te(k),fe=Z===""?"."+q(k,0):Z,M(te)?(K="",fe!=null&&(K=fe.replace(ee,"$&/")+"/"),$(te,B,K,"",function(me){return me})):te!=null&&(Y(te)&&(te=U(te,K+(te.key==null||k&&k.key===te.key?"":(""+te.key).replace(ee,"$&/")+"/")+fe)),B.push(te)),1;fe=0;var ne=Z===""?".":Z+":";if(M(k))for(var ae=0;ae>>1,k=C[P];if(0>>1;Pi(Z,L))tei(he,Z)?(C[P]=he,C[te]=L,P=te):(C[P]=Z,C[K]=L,P=K);else if(tei(he,L))C[P]=he,C[te]=L,P=te;else break e}}return D}function i(C,D){var L=C.sortIndex-D.sortIndex;return L!==0?L:C.id-D.id}if(e.unstable_now=void 0,typeof performance=="object"&&typeof performance.now=="function"){var u=performance;e.unstable_now=function(){return u.now()}}else{var d=Date,f=d.now();e.unstable_now=function(){return d.now()-f}}var m=[],p=[],g=1,v=null,y=3,w=!1,N=!1,S=!1,_=!1,A=typeof setTimeout=="function"?setTimeout:null,R=typeof clearTimeout=="function"?clearTimeout:null,E=typeof setImmediate<"u"?setImmediate:null;function M(C){for(var D=o(p);D!==null;){if(D.callback===null)s(p);else if(D.startTime<=C)s(p),D.sortIndex=D.expirationTime,r(m,D);else break;D=o(p)}}function O(C){if(S=!1,M(C),!N)if(o(m)!==null)N=!0,H||(H=!0,q());else{var D=o(p);D!==null&&$(O,D.startTime-C)}}var H=!1,I=-1,U=5,Y=-1;function Q(){return _?!0:!(e.unstable_now()-YC&&Q());){var P=v.callback;if(typeof P=="function"){v.callback=null,y=v.priorityLevel;var k=P(v.expirationTime<=C);if(C=e.unstable_now(),typeof k=="function"){v.callback=k,M(C),D=!0;break t}v===o(m)&&s(m),M(C)}else s(m);v=o(m)}if(v!==null)D=!0;else{var B=o(p);B!==null&&$(O,B.startTime-C),D=!1}}break e}finally{v=null,y=L,w=!1}D=void 0}}finally{D?q():H=!1}}}var q;if(typeof E=="function")q=function(){E(ee)};else if(typeof MessageChannel<"u"){var X=new MessageChannel,z=X.port2;X.port1.onmessage=ee,q=function(){z.postMessage(null)}}else q=function(){A(ee,0)};function $(C,D){I=A(function(){C(e.unstable_now())},D)}e.unstable_IdlePriority=5,e.unstable_ImmediatePriority=1,e.unstable_LowPriority=4,e.unstable_NormalPriority=3,e.unstable_Profiling=null,e.unstable_UserBlockingPriority=2,e.unstable_cancelCallback=function(C){C.callback=null},e.unstable_forceFrameRate=function(C){0>C||125P?(C.sortIndex=L,r(p,C),o(m)===null&&C===o(p)&&(S?(R(I),I=-1):S=!0,$(O,L-P))):(C.sortIndex=k,r(m,C),N||w||(N=!0,H||(H=!0,q()))),C},e.unstable_shouldYield=Q,e.unstable_wrapCallback=function(C){var D=y;return function(){var L=y;y=D;try{return C.apply(this,arguments)}finally{y=L}}}})(vh)),vh}var my;function E_(){return my||(my=1,yh.exports=N_()),yh.exports}var bh={exports:{}},kt={};/** - * @license React - * react-dom.production.js - * - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var py;function __(){if(py)return kt;py=1;var e=Bi();function r(m){var p="https://react.dev/errors/"+m;if(1"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(r){console.error(r)}}return e(),bh.exports=__(),bh.exports}/** - * @license React - * react-dom-client.production.js - * - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var xy;function j_(){if(xy)return di;xy=1;var e=E_(),r=Bi(),o=fb();function s(t){var n="https://react.dev/errors/"+t;if(1k||(t.current=P[k],P[k]=null,k--)}function Z(t,n){k++,P[k]=t.current,t.current=n}var te=B(null),he=B(null),fe=B(null),ne=B(null);function ae(t,n){switch(Z(fe,n),Z(he,t),Z(te,null),n.nodeType){case 9:case 11:t=(t=n.documentElement)&&(t=t.namespaceURI)?H0(t):0;break;default:if(t=n.tagName,n=n.namespaceURI)n=H0(n),t=I0(n,t);else switch(t){case"svg":t=1;break;case"math":t=2;break;default:t=0}}K(te),Z(te,t)}function me(){K(te),K(he),K(fe)}function ge(t){t.memoizedState!==null&&Z(ne,t);var n=te.current,a=I0(n,t.type);n!==a&&(Z(he,t),Z(te,a))}function se(t){he.current===t&&(K(te),K(he)),ne.current===t&&(K(ne),ai._currentValue=L)}var ie=Object.prototype.hasOwnProperty,de=e.unstable_scheduleCallback,pe=e.unstable_cancelCallback,we=e.unstable_shouldYield,Ne=e.unstable_requestPaint,Se=e.unstable_now,Ie=e.unstable_getCurrentPriorityLevel,St=e.unstable_ImmediatePriority,lt=e.unstable_UserBlockingPriority,ke=e.unstable_NormalPriority,Qe=e.unstable_LowPriority,xt=e.unstable_IdlePriority,hn=e.log,tn=e.unstable_setDisableYieldValue,nn=null,ht=null;function mn(t){if(typeof hn=="function"&&tn(t),ht&&typeof ht.setStrictMode=="function")try{ht.setStrictMode(nn,t)}catch{}}var Tt=Math.clz32?Math.clz32:nd,as=Math.log,td=Math.LN2;function nd(t){return t>>>=0,t===0?32:31-(as(t)/td|0)|0}var Yo=256,qo=4194304;function Gn(t){var n=t&42;if(n!==0)return n;switch(t&-t){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:return 64;case 128:return 128;case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return t&4194048;case 4194304:case 8388608:case 16777216:case 33554432:return t&62914560;case 67108864:return 67108864;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 0;default:return t}}function Go(t,n,a){var l=t.pendingLanes;if(l===0)return 0;var h=0,x=t.suspendedLanes,j=t.pingedLanes;t=t.warmLanes;var T=l&134217727;return T!==0?(l=T&~x,l!==0?h=Gn(l):(j&=T,j!==0?h=Gn(j):a||(a=T&~t,a!==0&&(h=Gn(a))))):(T=l&~x,T!==0?h=Gn(T):j!==0?h=Gn(j):a||(a=l&~t,a!==0&&(h=Gn(a)))),h===0?0:n!==0&&n!==h&&(n&x)===0&&(x=h&-h,a=n&-n,x>=a||x===32&&(a&4194048)!==0)?n:h}function io(t,n){return(t.pendingLanes&~(t.suspendedLanes&~t.pingedLanes)&n)===0}function rd(t,n){switch(t){case 1:case 2:case 4:case 8:case 64:return n+250;case 16:case 32:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return n+5e3;case 4194304:case 8388608:case 16777216:case 33554432:return-1;case 67108864:case 134217728:case 268435456:case 536870912:case 1073741824:return-1;default:return-1}}function al(){var t=Yo;return Yo<<=1,(Yo&4194048)===0&&(Yo=256),t}function sl(){var t=qo;return qo<<=1,(qo&62914560)===0&&(qo=4194304),t}function ss(t){for(var n=[],a=0;31>a;a++)n.push(t);return n}function lo(t,n){t.pendingLanes|=n,n!==268435456&&(t.suspendedLanes=0,t.pingedLanes=0,t.warmLanes=0)}function od(t,n,a,l,h,x){var j=t.pendingLanes;t.pendingLanes=a,t.suspendedLanes=0,t.pingedLanes=0,t.warmLanes=0,t.expiredLanes&=a,t.entangledLanes&=a,t.errorRecoveryDisabledLanes&=a,t.shellSuspendCounter=0;var T=t.entanglements,V=t.expirationTimes,J=t.hiddenUpdates;for(a=j&~a;0)":-1h||V[l]!==J[h]){var le=` -`+V[l].replace(" at new "," at ");return t.displayName&&le.includes("")&&(le=le.replace("",t.displayName)),le}while(1<=l&&0<=h);break}}}finally{ms=!1,Error.prepareStackTrace=a}return(a=t?t.displayName||t.name:"")?Kn(a):""}function ud(t){switch(t.tag){case 26:case 27:case 5:return Kn(t.type);case 16:return Kn("Lazy");case 13:return Kn("Suspense");case 19:return Kn("SuspenseList");case 0:case 15:return ps(t.type,!1);case 11:return ps(t.type.render,!1);case 1:return ps(t.type,!0);case 31:return Kn("Activity");default:return""}}function ml(t){try{var n="";do n+=ud(t),t=t.return;while(t);return n}catch(a){return` -Error generating stack: `+a.message+` -`+a.stack}}function zt(t){switch(typeof t){case"bigint":case"boolean":case"number":case"string":case"undefined":return t;case"object":return t;default:return""}}function pl(t){var n=t.type;return(t=t.nodeName)&&t.toLowerCase()==="input"&&(n==="checkbox"||n==="radio")}function dd(t){var n=pl(t)?"checked":"value",a=Object.getOwnPropertyDescriptor(t.constructor.prototype,n),l=""+t[n];if(!t.hasOwnProperty(n)&&typeof a<"u"&&typeof a.get=="function"&&typeof a.set=="function"){var h=a.get,x=a.set;return Object.defineProperty(t,n,{configurable:!0,get:function(){return h.call(this)},set:function(j){l=""+j,x.call(this,j)}}),Object.defineProperty(t,n,{enumerable:a.enumerable}),{getValue:function(){return l},setValue:function(j){l=""+j},stopTracking:function(){t._valueTracker=null,delete t[n]}}}}function Zo(t){t._valueTracker||(t._valueTracker=dd(t))}function gs(t){if(!t)return!1;var n=t._valueTracker;if(!n)return!0;var a=n.getValue(),l="";return t&&(l=pl(t)?t.checked?"true":"false":t.value),t=l,t!==a?(n.setValue(t),!0):!1}function Ko(t){if(t=t||(typeof document<"u"?document:void 0),typeof t>"u")return null;try{return t.activeElement||t.body}catch{return t.body}}var fd=/[\n"\\]/g;function Lt(t){return t.replace(fd,function(n){return"\\"+n.charCodeAt(0).toString(16)+" "})}function uo(t,n,a,l,h,x,j,T){t.name="",j!=null&&typeof j!="function"&&typeof j!="symbol"&&typeof j!="boolean"?t.type=j:t.removeAttribute("type"),n!=null?j==="number"?(n===0&&t.value===""||t.value!=n)&&(t.value=""+zt(n)):t.value!==""+zt(n)&&(t.value=""+zt(n)):j!=="submit"&&j!=="reset"||t.removeAttribute("value"),n!=null?xs(t,j,zt(n)):a!=null?xs(t,j,zt(a)):l!=null&&t.removeAttribute("value"),h==null&&x!=null&&(t.defaultChecked=!!x),h!=null&&(t.checked=h&&typeof h!="function"&&typeof h!="symbol"),T!=null&&typeof T!="function"&&typeof T!="symbol"&&typeof T!="boolean"?t.name=""+zt(T):t.removeAttribute("name")}function gl(t,n,a,l,h,x,j,T){if(x!=null&&typeof x!="function"&&typeof x!="symbol"&&typeof x!="boolean"&&(t.type=x),n!=null||a!=null){if(!(x!=="submit"&&x!=="reset"||n!=null))return;a=a!=null?""+zt(a):"",n=n!=null?""+zt(n):a,T||n===t.value||(t.value=n),t.defaultValue=n}l=l??h,l=typeof l!="function"&&typeof l!="symbol"&&!!l,t.checked=T?t.checked:!!l,t.defaultChecked=!!l,j!=null&&typeof j!="function"&&typeof j!="symbol"&&typeof j!="boolean"&&(t.name=j)}function xs(t,n,a){n==="number"&&Ko(t.ownerDocument)===t||t.defaultValue===""+a||(t.defaultValue=""+a)}function Wn(t,n,a,l){if(t=t.options,n){n={};for(var h=0;h"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),xd=!1;if(Qn)try{var vs={};Object.defineProperty(vs,"passive",{get:function(){xd=!0}}),window.addEventListener("test",vs,vs),window.removeEventListener("test",vs,vs)}catch{xd=!1}var jr=null,yd=null,yl=null;function Pp(){if(yl)return yl;var t,n=yd,a=n.length,l,h="value"in jr?jr.value:jr.textContent,x=h.length;for(t=0;t=Ss),Xp=" ",Fp=!1;function Zp(t,n){switch(t){case"keyup":return VN.indexOf(n.keyCode)!==-1;case"keydown":return n.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function Kp(t){return t=t.detail,typeof t=="object"&&"data"in t?t.data:null}var ea=!1;function qN(t,n){switch(t){case"compositionend":return Kp(n);case"keypress":return n.which!==32?null:(Fp=!0,Xp);case"textInput":return t=n.data,t===Xp&&Fp?null:t;default:return null}}function GN(t,n){if(ea)return t==="compositionend"||!Nd&&Zp(t,n)?(t=Pp(),yl=yd=jr=null,ea=!1,t):null;switch(t){case"paste":return null;case"keypress":if(!(n.ctrlKey||n.altKey||n.metaKey)||n.ctrlKey&&n.altKey){if(n.char&&1=n)return{node:a,offset:n-t};t=l}e:{for(;a;){if(a.nextSibling){a=a.nextSibling;break e}a=a.parentNode}a=void 0}a=og(a)}}function sg(t,n){return t&&n?t===n?!0:t&&t.nodeType===3?!1:n&&n.nodeType===3?sg(t,n.parentNode):"contains"in t?t.contains(n):t.compareDocumentPosition?!!(t.compareDocumentPosition(n)&16):!1:!1}function ig(t){t=t!=null&&t.ownerDocument!=null&&t.ownerDocument.defaultView!=null?t.ownerDocument.defaultView:window;for(var n=Ko(t.document);n instanceof t.HTMLIFrameElement;){try{var a=typeof n.contentWindow.location.href=="string"}catch{a=!1}if(a)t=n.contentWindow;else break;n=Ko(t.document)}return n}function jd(t){var n=t&&t.nodeName&&t.nodeName.toLowerCase();return n&&(n==="input"&&(t.type==="text"||t.type==="search"||t.type==="tel"||t.type==="url"||t.type==="password")||n==="textarea"||t.contentEditable==="true")}var eE=Qn&&"documentMode"in document&&11>=document.documentMode,ta=null,Cd=null,js=null,Ad=!1;function lg(t,n,a){var l=a.window===a?a.document:a.nodeType===9?a:a.ownerDocument;Ad||ta==null||ta!==Ko(l)||(l=ta,"selectionStart"in l&&jd(l)?l={start:l.selectionStart,end:l.selectionEnd}:(l=(l.ownerDocument&&l.ownerDocument.defaultView||window).getSelection(),l={anchorNode:l.anchorNode,anchorOffset:l.anchorOffset,focusNode:l.focusNode,focusOffset:l.focusOffset}),js&&_s(js,l)||(js=l,l=lc(Cd,"onSelect"),0>=j,h-=j,er=1<<32-Tt(n)+h|a<x?x:8;var j=C.T,T={};C.T=T,mf(t,!1,n,a);try{var V=h(),J=C.S;if(J!==null&&J(T,V),V!==null&&typeof V=="object"&&typeof V.then=="function"){var le=cE(V,l);Ps(t,n,le,Zt(t))}else Ps(t,n,l,Zt(t))}catch(ue){Ps(t,n,{then:function(){},status:"rejected",reason:ue},Zt())}finally{D.p=x,C.T=j}}function mE(){}function ff(t,n,a,l){if(t.tag!==5)throw Error(s(476));var h=cx(t).queue;lx(t,h,n,L,a===null?mE:function(){return ux(t),a(l)})}function cx(t){var n=t.memoizedState;if(n!==null)return n;n={memoizedState:L,baseState:L,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:or,lastRenderedState:L},next:null};var a={};return n.next={memoizedState:a,baseState:a,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:or,lastRenderedState:a},next:null},t.memoizedState=n,t=t.alternate,t!==null&&(t.memoizedState=n),n}function ux(t){var n=cx(t).next.queue;Ps(t,n,{},Zt())}function hf(){return Mt(ai)}function dx(){return dt().memoizedState}function fx(){return dt().memoizedState}function pE(t){for(var n=t.return;n!==null;){switch(n.tag){case 24:case 3:var a=Zt();t=Mr(a);var l=kr(n,t,a);l!==null&&(Kt(l,n,a),zs(l,n,a)),n={cache:$d()},t.payload=n;return}n=n.return}}function gE(t,n,a){var l=Zt();a={lane:l,revertLane:0,action:a,hasEagerState:!1,eagerState:null,next:null},$l(t)?mx(n,a):(a=Rd(t,n,a,l),a!==null&&(Kt(a,t,l),px(a,n,l)))}function hx(t,n,a){var l=Zt();Ps(t,n,a,l)}function Ps(t,n,a,l){var h={lane:l,revertLane:0,action:a,hasEagerState:!1,eagerState:null,next:null};if($l(t))mx(n,h);else{var x=t.alternate;if(t.lanes===0&&(x===null||x.lanes===0)&&(x=n.lastRenderedReducer,x!==null))try{var j=n.lastRenderedState,T=x(j,a);if(h.hasEagerState=!0,h.eagerState=T,Yt(T,j))return _l(t,n,h,0),Je===null&&El(),!1}catch{}finally{}if(a=Rd(t,n,h,l),a!==null)return Kt(a,t,l),px(a,n,l),!0}return!1}function mf(t,n,a,l){if(l={lane:2,revertLane:Gf(),action:l,hasEagerState:!1,eagerState:null,next:null},$l(t)){if(n)throw Error(s(479))}else n=Rd(t,a,l,2),n!==null&&Kt(n,t,2)}function $l(t){var n=t.alternate;return t===De||n!==null&&n===De}function mx(t,n){da=Ll=!0;var a=t.pending;a===null?n.next=n:(n.next=a.next,a.next=n),t.pending=n}function px(t,n,a){if((a&4194048)!==0){var l=n.lanes;l&=t.pendingLanes,a|=l,n.lanes=a,is(t,a)}}var Vl={readContext:Mt,use:Il,useCallback:st,useContext:st,useEffect:st,useImperativeHandle:st,useLayoutEffect:st,useInsertionEffect:st,useMemo:st,useReducer:st,useRef:st,useState:st,useDebugValue:st,useDeferredValue:st,useTransition:st,useSyncExternalStore:st,useId:st,useHostTransitionStatus:st,useFormState:st,useActionState:st,useOptimistic:st,useMemoCache:st,useCacheRefresh:st},gx={readContext:Mt,use:Il,useCallback:function(t,n){return It().memoizedState=[t,n===void 0?null:n],t},useContext:Mt,useEffect:Jg,useImperativeHandle:function(t,n,a){a=a!=null?a.concat([t]):null,Pl(4194308,4,rx.bind(null,n,t),a)},useLayoutEffect:function(t,n){return Pl(4194308,4,t,n)},useInsertionEffect:function(t,n){Pl(4,2,t,n)},useMemo:function(t,n){var a=It();n=n===void 0?null:n;var l=t();if(No){mn(!0);try{t()}finally{mn(!1)}}return a.memoizedState=[l,n],l},useReducer:function(t,n,a){var l=It();if(a!==void 0){var h=a(n);if(No){mn(!0);try{a(n)}finally{mn(!1)}}}else h=n;return l.memoizedState=l.baseState=h,t={pending:null,lanes:0,dispatch:null,lastRenderedReducer:t,lastRenderedState:h},l.queue=t,t=t.dispatch=gE.bind(null,De,t),[l.memoizedState,t]},useRef:function(t){var n=It();return t={current:t},n.memoizedState=t},useState:function(t){t=lf(t);var n=t.queue,a=hx.bind(null,De,n);return n.dispatch=a,[t.memoizedState,a]},useDebugValue:uf,useDeferredValue:function(t,n){var a=It();return df(a,t,n)},useTransition:function(){var t=lf(!1);return t=lx.bind(null,De,t.queue,!0,!1),It().memoizedState=t,[!1,t]},useSyncExternalStore:function(t,n,a){var l=De,h=It();if(Ye){if(a===void 0)throw Error(s(407));a=a()}else{if(a=n(),Je===null)throw Error(s(349));(Ue&124)!==0||Hg(l,n,a)}h.memoizedState=a;var x={value:a,getSnapshot:n};return h.queue=x,Jg(Bg.bind(null,l,x,t),[t]),l.flags|=2048,ha(9,Ul(),Ig.bind(null,l,x,a,n),null),a},useId:function(){var t=It(),n=Je.identifierPrefix;if(Ye){var a=tr,l=er;a=(l&~(1<<32-Tt(l)-1)).toString(32)+a,n="«"+n+"R"+a,a=Hl++,0Ae?(wt=Ee,Ee=null):wt=Ee.sibling;var $e=re(F,Ee,W[Ae],ce);if($e===null){Ee===null&&(Ee=wt);break}t&&Ee&&$e.alternate===null&&n(F,Ee),G=x($e,G,Ae),ze===null?xe=$e:ze.sibling=$e,ze=$e,Ee=wt}if(Ae===W.length)return a(F,Ee),Ye&&xo(F,Ae),xe;if(Ee===null){for(;AeAe?(wt=Ee,Ee=null):wt=Ee.sibling;var Xr=re(F,Ee,$e.value,ce);if(Xr===null){Ee===null&&(Ee=wt);break}t&&Ee&&Xr.alternate===null&&n(F,Ee),G=x(Xr,G,Ae),ze===null?xe=Xr:ze.sibling=Xr,ze=Xr,Ee=wt}if($e.done)return a(F,Ee),Ye&&xo(F,Ae),xe;if(Ee===null){for(;!$e.done;Ae++,$e=W.next())$e=ue(F,$e.value,ce),$e!==null&&(G=x($e,G,Ae),ze===null?xe=$e:ze.sibling=$e,ze=$e);return Ye&&xo(F,Ae),xe}for(Ee=l(Ee);!$e.done;Ae++,$e=W.next())$e=oe(Ee,F,Ae,$e.value,ce),$e!==null&&(t&&$e.alternate!==null&&Ee.delete($e.key===null?Ae:$e.key),G=x($e,G,Ae),ze===null?xe=$e:ze.sibling=$e,ze=$e);return t&&Ee.forEach(function(y_){return n(F,y_)}),Ye&&xo(F,Ae),xe}function Ke(F,G,W,ce){if(typeof W=="object"&&W!==null&&W.type===N&&W.key===null&&(W=W.props.children),typeof W=="object"&&W!==null){switch(W.$$typeof){case y:e:{for(var xe=W.key;G!==null;){if(G.key===xe){if(xe=W.type,xe===N){if(G.tag===7){a(F,G.sibling),ce=h(G,W.props.children),ce.return=F,F=ce;break e}}else if(G.elementType===xe||typeof xe=="object"&&xe!==null&&xe.$$typeof===U&&yx(xe)===G.type){a(F,G.sibling),ce=h(G,W.props),Vs(ce,W),ce.return=F,F=ce;break e}a(F,G);break}else n(F,G);G=G.sibling}W.type===N?(ce=po(W.props.children,F.mode,ce,W.key),ce.return=F,F=ce):(ce=Cl(W.type,W.key,W.props,null,F.mode,ce),Vs(ce,W),ce.return=F,F=ce)}return j(F);case w:e:{for(xe=W.key;G!==null;){if(G.key===xe)if(G.tag===4&&G.stateNode.containerInfo===W.containerInfo&&G.stateNode.implementation===W.implementation){a(F,G.sibling),ce=h(G,W.children||[]),ce.return=F,F=ce;break e}else{a(F,G);break}else n(F,G);G=G.sibling}ce=zd(W,F.mode,ce),ce.return=F,F=ce}return j(F);case U:return xe=W._init,W=xe(W._payload),Ke(F,G,W,ce)}if($(W))return Me(F,G,W,ce);if(q(W)){if(xe=q(W),typeof xe!="function")throw Error(s(150));return W=xe.call(W),Ce(F,G,W,ce)}if(typeof W.then=="function")return Ke(F,G,Yl(W),ce);if(W.$$typeof===E)return Ke(F,G,Tl(F,W),ce);ql(F,W)}return typeof W=="string"&&W!==""||typeof W=="number"||typeof W=="bigint"?(W=""+W,G!==null&&G.tag===6?(a(F,G.sibling),ce=h(G,W),ce.return=F,F=ce):(a(F,G),ce=Od(W,F.mode,ce),ce.return=F,F=ce),j(F)):a(F,G)}return function(F,G,W,ce){try{$s=0;var xe=Ke(F,G,W,ce);return ma=null,xe}catch(Ee){if(Ee===Ds||Ee===Dl)throw Ee;var ze=qt(29,Ee,null,F.mode);return ze.lanes=ce,ze.return=F,ze}finally{}}}var pa=vx(!0),bx=vx(!1),ln=B(null),Mn=null;function Rr(t){var n=t.alternate;Z(pt,pt.current&1),Z(ln,t),Mn===null&&(n===null||ua.current!==null||n.memoizedState!==null)&&(Mn=t)}function wx(t){if(t.tag===22){if(Z(pt,pt.current),Z(ln,t),Mn===null){var n=t.alternate;n!==null&&n.memoizedState!==null&&(Mn=t)}}else Dr()}function Dr(){Z(pt,pt.current),Z(ln,ln.current)}function ar(t){K(ln),Mn===t&&(Mn=null),K(pt)}var pt=B(0);function Gl(t){for(var n=t;n!==null;){if(n.tag===13){var a=n.memoizedState;if(a!==null&&(a=a.dehydrated,a===null||a.data==="$?"||oh(a)))return n}else if(n.tag===19&&n.memoizedProps.revealOrder!==void 0){if((n.flags&128)!==0)return n}else if(n.child!==null){n.child.return=n,n=n.child;continue}if(n===t)break;for(;n.sibling===null;){if(n.return===null||n.return===t)return null;n=n.return}n.sibling.return=n.return,n=n.sibling}return null}function pf(t,n,a,l){n=t.memoizedState,a=a(l,n),a=a==null?n:g({},n,a),t.memoizedState=a,t.lanes===0&&(t.updateQueue.baseState=a)}var gf={enqueueSetState:function(t,n,a){t=t._reactInternals;var l=Zt(),h=Mr(l);h.payload=n,a!=null&&(h.callback=a),n=kr(t,h,l),n!==null&&(Kt(n,t,l),zs(n,t,l))},enqueueReplaceState:function(t,n,a){t=t._reactInternals;var l=Zt(),h=Mr(l);h.tag=1,h.payload=n,a!=null&&(h.callback=a),n=kr(t,h,l),n!==null&&(Kt(n,t,l),zs(n,t,l))},enqueueForceUpdate:function(t,n){t=t._reactInternals;var a=Zt(),l=Mr(a);l.tag=2,n!=null&&(l.callback=n),n=kr(t,l,a),n!==null&&(Kt(n,t,a),zs(n,t,a))}};function Sx(t,n,a,l,h,x,j){return t=t.stateNode,typeof t.shouldComponentUpdate=="function"?t.shouldComponentUpdate(l,x,j):n.prototype&&n.prototype.isPureReactComponent?!_s(a,l)||!_s(h,x):!0}function Nx(t,n,a,l){t=n.state,typeof n.componentWillReceiveProps=="function"&&n.componentWillReceiveProps(a,l),typeof n.UNSAFE_componentWillReceiveProps=="function"&&n.UNSAFE_componentWillReceiveProps(a,l),n.state!==t&&gf.enqueueReplaceState(n,n.state,null)}function Eo(t,n){var a=n;if("ref"in n){a={};for(var l in n)l!=="ref"&&(a[l]=n[l])}if(t=t.defaultProps){a===n&&(a=g({},a));for(var h in t)a[h]===void 0&&(a[h]=t[h])}return a}var Xl=typeof reportError=="function"?reportError:function(t){if(typeof window=="object"&&typeof window.ErrorEvent=="function"){var n=new window.ErrorEvent("error",{bubbles:!0,cancelable:!0,message:typeof t=="object"&&t!==null&&typeof t.message=="string"?String(t.message):String(t),error:t});if(!window.dispatchEvent(n))return}else if(typeof process=="object"&&typeof process.emit=="function"){process.emit("uncaughtException",t);return}console.error(t)};function Ex(t){Xl(t)}function _x(t){console.error(t)}function jx(t){Xl(t)}function Fl(t,n){try{var a=t.onUncaughtError;a(n.value,{componentStack:n.stack})}catch(l){setTimeout(function(){throw l})}}function Cx(t,n,a){try{var l=t.onCaughtError;l(a.value,{componentStack:a.stack,errorBoundary:n.tag===1?n.stateNode:null})}catch(h){setTimeout(function(){throw h})}}function xf(t,n,a){return a=Mr(a),a.tag=3,a.payload={element:null},a.callback=function(){Fl(t,n)},a}function Ax(t){return t=Mr(t),t.tag=3,t}function Mx(t,n,a,l){var h=a.type.getDerivedStateFromError;if(typeof h=="function"){var x=l.value;t.payload=function(){return h(x)},t.callback=function(){Cx(n,a,l)}}var j=a.stateNode;j!==null&&typeof j.componentDidCatch=="function"&&(t.callback=function(){Cx(n,a,l),typeof h!="function"&&(Br===null?Br=new Set([this]):Br.add(this));var T=l.stack;this.componentDidCatch(l.value,{componentStack:T!==null?T:""})})}function yE(t,n,a,l,h){if(a.flags|=32768,l!==null&&typeof l=="object"&&typeof l.then=="function"){if(n=a.alternate,n!==null&&ks(n,a,h,!0),a=ln.current,a!==null){switch(a.tag){case 13:return Mn===null?Pf():a.alternate===null&&at===0&&(at=3),a.flags&=-257,a.flags|=65536,a.lanes=h,l===qd?a.flags|=16384:(n=a.updateQueue,n===null?a.updateQueue=new Set([l]):n.add(l),Vf(t,l,h)),!1;case 22:return a.flags|=65536,l===qd?a.flags|=16384:(n=a.updateQueue,n===null?(n={transitions:null,markerInstances:null,retryQueue:new Set([l])},a.updateQueue=n):(a=n.retryQueue,a===null?n.retryQueue=new Set([l]):a.add(l)),Vf(t,l,h)),!1}throw Error(s(435,a.tag))}return Vf(t,l,h),Pf(),!1}if(Ye)return n=ln.current,n!==null?((n.flags&65536)===0&&(n.flags|=256),n.flags|=65536,n.lanes=h,l!==Id&&(t=Error(s(422),{cause:l}),Ms(rn(t,a)))):(l!==Id&&(n=Error(s(423),{cause:l}),Ms(rn(n,a))),t=t.current.alternate,t.flags|=65536,h&=-h,t.lanes|=h,l=rn(l,a),h=xf(t.stateNode,l,h),Fd(t,h),at!==4&&(at=2)),!1;var x=Error(s(520),{cause:l});if(x=rn(x,a),Ks===null?Ks=[x]:Ks.push(x),at!==4&&(at=2),n===null)return!0;l=rn(l,a),a=n;do{switch(a.tag){case 3:return a.flags|=65536,t=h&-h,a.lanes|=t,t=xf(a.stateNode,l,t),Fd(a,t),!1;case 1:if(n=a.type,x=a.stateNode,(a.flags&128)===0&&(typeof n.getDerivedStateFromError=="function"||x!==null&&typeof x.componentDidCatch=="function"&&(Br===null||!Br.has(x))))return a.flags|=65536,h&=-h,a.lanes|=h,h=Ax(h),Mx(h,t,a,l),Fd(a,h),!1}a=a.return}while(a!==null);return!1}var kx=Error(s(461)),vt=!1;function Nt(t,n,a,l){n.child=t===null?bx(n,null,a,l):pa(n,t.child,a,l)}function Tx(t,n,a,l,h){a=a.render;var x=n.ref;if("ref"in l){var j={};for(var T in l)T!=="ref"&&(j[T]=l[T])}else j=l;return wo(n),l=Jd(t,n,a,j,x,h),T=ef(),t!==null&&!vt?(tf(t,n,h),sr(t,n,h)):(Ye&&T&&Ld(n),n.flags|=1,Nt(t,n,l,h),n.child)}function Rx(t,n,a,l,h){if(t===null){var x=a.type;return typeof x=="function"&&!Dd(x)&&x.defaultProps===void 0&&a.compare===null?(n.tag=15,n.type=x,Dx(t,n,x,l,h)):(t=Cl(a.type,null,l,n,n.mode,h),t.ref=n.ref,t.return=n,n.child=t)}if(x=t.child,!_f(t,h)){var j=x.memoizedProps;if(a=a.compare,a=a!==null?a:_s,a(j,l)&&t.ref===n.ref)return sr(t,n,h)}return n.flags|=1,t=Jn(x,l),t.ref=n.ref,t.return=n,n.child=t}function Dx(t,n,a,l,h){if(t!==null){var x=t.memoizedProps;if(_s(x,l)&&t.ref===n.ref)if(vt=!1,n.pendingProps=l=x,_f(t,h))(t.flags&131072)!==0&&(vt=!0);else return n.lanes=t.lanes,sr(t,n,h)}return yf(t,n,a,l,h)}function Ox(t,n,a){var l=n.pendingProps,h=l.children,x=t!==null?t.memoizedState:null;if(l.mode==="hidden"){if((n.flags&128)!==0){if(l=x!==null?x.baseLanes|a:a,t!==null){for(h=n.child=t.child,x=0;h!==null;)x=x|h.lanes|h.childLanes,h=h.sibling;n.childLanes=x&~l}else n.childLanes=0,n.child=null;return zx(t,n,l,a)}if((a&536870912)!==0)n.memoizedState={baseLanes:0,cachePool:null},t!==null&&Rl(n,x!==null?x.cachePool:null),x!==null?Dg(n,x):Kd(),wx(n);else return n.lanes=n.childLanes=536870912,zx(t,n,x!==null?x.baseLanes|a:a,a)}else x!==null?(Rl(n,x.cachePool),Dg(n,x),Dr(),n.memoizedState=null):(t!==null&&Rl(n,null),Kd(),Dr());return Nt(t,n,h,a),n.child}function zx(t,n,a,l){var h=Yd();return h=h===null?null:{parent:mt._currentValue,pool:h},n.memoizedState={baseLanes:a,cachePool:h},t!==null&&Rl(n,null),Kd(),wx(n),t!==null&&ks(t,n,l,!0),null}function Zl(t,n){var a=n.ref;if(a===null)t!==null&&t.ref!==null&&(n.flags|=4194816);else{if(typeof a!="function"&&typeof a!="object")throw Error(s(284));(t===null||t.ref!==a)&&(n.flags|=4194816)}}function yf(t,n,a,l,h){return wo(n),a=Jd(t,n,a,l,void 0,h),l=ef(),t!==null&&!vt?(tf(t,n,h),sr(t,n,h)):(Ye&&l&&Ld(n),n.flags|=1,Nt(t,n,a,h),n.child)}function Lx(t,n,a,l,h,x){return wo(n),n.updateQueue=null,a=zg(n,l,a,h),Og(t),l=ef(),t!==null&&!vt?(tf(t,n,x),sr(t,n,x)):(Ye&&l&&Ld(n),n.flags|=1,Nt(t,n,a,x),n.child)}function Hx(t,n,a,l,h){if(wo(n),n.stateNode===null){var x=aa,j=a.contextType;typeof j=="object"&&j!==null&&(x=Mt(j)),x=new a(l,x),n.memoizedState=x.state!==null&&x.state!==void 0?x.state:null,x.updater=gf,n.stateNode=x,x._reactInternals=n,x=n.stateNode,x.props=l,x.state=n.memoizedState,x.refs={},Gd(n),j=a.contextType,x.context=typeof j=="object"&&j!==null?Mt(j):aa,x.state=n.memoizedState,j=a.getDerivedStateFromProps,typeof j=="function"&&(pf(n,a,j,l),x.state=n.memoizedState),typeof a.getDerivedStateFromProps=="function"||typeof x.getSnapshotBeforeUpdate=="function"||typeof x.UNSAFE_componentWillMount!="function"&&typeof x.componentWillMount!="function"||(j=x.state,typeof x.componentWillMount=="function"&&x.componentWillMount(),typeof x.UNSAFE_componentWillMount=="function"&&x.UNSAFE_componentWillMount(),j!==x.state&&gf.enqueueReplaceState(x,x.state,null),Hs(n,l,x,h),Ls(),x.state=n.memoizedState),typeof x.componentDidMount=="function"&&(n.flags|=4194308),l=!0}else if(t===null){x=n.stateNode;var T=n.memoizedProps,V=Eo(a,T);x.props=V;var J=x.context,le=a.contextType;j=aa,typeof le=="object"&&le!==null&&(j=Mt(le));var ue=a.getDerivedStateFromProps;le=typeof ue=="function"||typeof x.getSnapshotBeforeUpdate=="function",T=n.pendingProps!==T,le||typeof x.UNSAFE_componentWillReceiveProps!="function"&&typeof x.componentWillReceiveProps!="function"||(T||J!==j)&&Nx(n,x,l,j),Ar=!1;var re=n.memoizedState;x.state=re,Hs(n,l,x,h),Ls(),J=n.memoizedState,T||re!==J||Ar?(typeof ue=="function"&&(pf(n,a,ue,l),J=n.memoizedState),(V=Ar||Sx(n,a,V,l,re,J,j))?(le||typeof x.UNSAFE_componentWillMount!="function"&&typeof x.componentWillMount!="function"||(typeof x.componentWillMount=="function"&&x.componentWillMount(),typeof x.UNSAFE_componentWillMount=="function"&&x.UNSAFE_componentWillMount()),typeof x.componentDidMount=="function"&&(n.flags|=4194308)):(typeof x.componentDidMount=="function"&&(n.flags|=4194308),n.memoizedProps=l,n.memoizedState=J),x.props=l,x.state=J,x.context=j,l=V):(typeof x.componentDidMount=="function"&&(n.flags|=4194308),l=!1)}else{x=n.stateNode,Xd(t,n),j=n.memoizedProps,le=Eo(a,j),x.props=le,ue=n.pendingProps,re=x.context,J=a.contextType,V=aa,typeof J=="object"&&J!==null&&(V=Mt(J)),T=a.getDerivedStateFromProps,(J=typeof T=="function"||typeof x.getSnapshotBeforeUpdate=="function")||typeof x.UNSAFE_componentWillReceiveProps!="function"&&typeof x.componentWillReceiveProps!="function"||(j!==ue||re!==V)&&Nx(n,x,l,V),Ar=!1,re=n.memoizedState,x.state=re,Hs(n,l,x,h),Ls();var oe=n.memoizedState;j!==ue||re!==oe||Ar||t!==null&&t.dependencies!==null&&kl(t.dependencies)?(typeof T=="function"&&(pf(n,a,T,l),oe=n.memoizedState),(le=Ar||Sx(n,a,le,l,re,oe,V)||t!==null&&t.dependencies!==null&&kl(t.dependencies))?(J||typeof x.UNSAFE_componentWillUpdate!="function"&&typeof x.componentWillUpdate!="function"||(typeof x.componentWillUpdate=="function"&&x.componentWillUpdate(l,oe,V),typeof x.UNSAFE_componentWillUpdate=="function"&&x.UNSAFE_componentWillUpdate(l,oe,V)),typeof x.componentDidUpdate=="function"&&(n.flags|=4),typeof x.getSnapshotBeforeUpdate=="function"&&(n.flags|=1024)):(typeof x.componentDidUpdate!="function"||j===t.memoizedProps&&re===t.memoizedState||(n.flags|=4),typeof x.getSnapshotBeforeUpdate!="function"||j===t.memoizedProps&&re===t.memoizedState||(n.flags|=1024),n.memoizedProps=l,n.memoizedState=oe),x.props=l,x.state=oe,x.context=V,l=le):(typeof x.componentDidUpdate!="function"||j===t.memoizedProps&&re===t.memoizedState||(n.flags|=4),typeof x.getSnapshotBeforeUpdate!="function"||j===t.memoizedProps&&re===t.memoizedState||(n.flags|=1024),l=!1)}return x=l,Zl(t,n),l=(n.flags&128)!==0,x||l?(x=n.stateNode,a=l&&typeof a.getDerivedStateFromError!="function"?null:x.render(),n.flags|=1,t!==null&&l?(n.child=pa(n,t.child,null,h),n.child=pa(n,null,a,h)):Nt(t,n,a,h),n.memoizedState=x.state,t=n.child):t=sr(t,n,h),t}function Ix(t,n,a,l){return As(),n.flags|=256,Nt(t,n,a,l),n.child}var vf={dehydrated:null,treeContext:null,retryLane:0,hydrationErrors:null};function bf(t){return{baseLanes:t,cachePool:_g()}}function wf(t,n,a){return t=t!==null?t.childLanes&~a:0,n&&(t|=cn),t}function Bx(t,n,a){var l=n.pendingProps,h=!1,x=(n.flags&128)!==0,j;if((j=x)||(j=t!==null&&t.memoizedState===null?!1:(pt.current&2)!==0),j&&(h=!0,n.flags&=-129),j=(n.flags&32)!==0,n.flags&=-33,t===null){if(Ye){if(h?Rr(n):Dr(),Ye){var T=ot,V;if(V=T){e:{for(V=T,T=An;V.nodeType!==8;){if(!T){T=null;break e}if(V=yn(V.nextSibling),V===null){T=null;break e}}T=V}T!==null?(n.memoizedState={dehydrated:T,treeContext:go!==null?{id:er,overflow:tr}:null,retryLane:536870912,hydrationErrors:null},V=qt(18,null,null,0),V.stateNode=T,V.return=n,n.child=V,Rt=n,ot=null,V=!0):V=!1}V||vo(n)}if(T=n.memoizedState,T!==null&&(T=T.dehydrated,T!==null))return oh(T)?n.lanes=32:n.lanes=536870912,null;ar(n)}return T=l.children,l=l.fallback,h?(Dr(),h=n.mode,T=Kl({mode:"hidden",children:T},h),l=po(l,h,a,null),T.return=n,l.return=n,T.sibling=l,n.child=T,h=n.child,h.memoizedState=bf(a),h.childLanes=wf(t,j,a),n.memoizedState=vf,l):(Rr(n),Sf(n,T))}if(V=t.memoizedState,V!==null&&(T=V.dehydrated,T!==null)){if(x)n.flags&256?(Rr(n),n.flags&=-257,n=Nf(t,n,a)):n.memoizedState!==null?(Dr(),n.child=t.child,n.flags|=128,n=null):(Dr(),h=l.fallback,T=n.mode,l=Kl({mode:"visible",children:l.children},T),h=po(h,T,a,null),h.flags|=2,l.return=n,h.return=n,l.sibling=h,n.child=l,pa(n,t.child,null,a),l=n.child,l.memoizedState=bf(a),l.childLanes=wf(t,j,a),n.memoizedState=vf,n=h);else if(Rr(n),oh(T)){if(j=T.nextSibling&&T.nextSibling.dataset,j)var J=j.dgst;j=J,l=Error(s(419)),l.stack="",l.digest=j,Ms({value:l,source:null,stack:null}),n=Nf(t,n,a)}else if(vt||ks(t,n,a,!1),j=(a&t.childLanes)!==0,vt||j){if(j=Je,j!==null&&(l=a&-a,l=(l&42)!==0?1:ls(l),l=(l&(j.suspendedLanes|a))!==0?0:l,l!==0&&l!==V.retryLane))throw V.retryLane=l,oa(t,l),Kt(j,t,l),kx;T.data==="$?"||Pf(),n=Nf(t,n,a)}else T.data==="$?"?(n.flags|=192,n.child=t.child,n=null):(t=V.treeContext,ot=yn(T.nextSibling),Rt=n,Ye=!0,yo=null,An=!1,t!==null&&(an[sn++]=er,an[sn++]=tr,an[sn++]=go,er=t.id,tr=t.overflow,go=n),n=Sf(n,l.children),n.flags|=4096);return n}return h?(Dr(),h=l.fallback,T=n.mode,V=t.child,J=V.sibling,l=Jn(V,{mode:"hidden",children:l.children}),l.subtreeFlags=V.subtreeFlags&65011712,J!==null?h=Jn(J,h):(h=po(h,T,a,null),h.flags|=2),h.return=n,l.return=n,l.sibling=h,n.child=l,l=h,h=n.child,T=t.child.memoizedState,T===null?T=bf(a):(V=T.cachePool,V!==null?(J=mt._currentValue,V=V.parent!==J?{parent:J,pool:J}:V):V=_g(),T={baseLanes:T.baseLanes|a,cachePool:V}),h.memoizedState=T,h.childLanes=wf(t,j,a),n.memoizedState=vf,l):(Rr(n),a=t.child,t=a.sibling,a=Jn(a,{mode:"visible",children:l.children}),a.return=n,a.sibling=null,t!==null&&(j=n.deletions,j===null?(n.deletions=[t],n.flags|=16):j.push(t)),n.child=a,n.memoizedState=null,a)}function Sf(t,n){return n=Kl({mode:"visible",children:n},t.mode),n.return=t,t.child=n}function Kl(t,n){return t=qt(22,t,null,n),t.lanes=0,t.stateNode={_visibility:1,_pendingMarkers:null,_retryCache:null,_transitions:null},t}function Nf(t,n,a){return pa(n,t.child,null,a),t=Sf(n,n.pendingProps.children),t.flags|=2,n.memoizedState=null,t}function Ux(t,n,a){t.lanes|=n;var l=t.alternate;l!==null&&(l.lanes|=n),Ud(t.return,n,a)}function Ef(t,n,a,l,h){var x=t.memoizedState;x===null?t.memoizedState={isBackwards:n,rendering:null,renderingStartTime:0,last:l,tail:a,tailMode:h}:(x.isBackwards=n,x.rendering=null,x.renderingStartTime=0,x.last=l,x.tail=a,x.tailMode=h)}function Px(t,n,a){var l=n.pendingProps,h=l.revealOrder,x=l.tail;if(Nt(t,n,l.children,a),l=pt.current,(l&2)!==0)l=l&1|2,n.flags|=128;else{if(t!==null&&(t.flags&128)!==0)e:for(t=n.child;t!==null;){if(t.tag===13)t.memoizedState!==null&&Ux(t,a,n);else if(t.tag===19)Ux(t,a,n);else if(t.child!==null){t.child.return=t,t=t.child;continue}if(t===n)break e;for(;t.sibling===null;){if(t.return===null||t.return===n)break e;t=t.return}t.sibling.return=t.return,t=t.sibling}l&=1}switch(Z(pt,l),h){case"forwards":for(a=n.child,h=null;a!==null;)t=a.alternate,t!==null&&Gl(t)===null&&(h=a),a=a.sibling;a=h,a===null?(h=n.child,n.child=null):(h=a.sibling,a.sibling=null),Ef(n,!1,h,a,x);break;case"backwards":for(a=null,h=n.child,n.child=null;h!==null;){if(t=h.alternate,t!==null&&Gl(t)===null){n.child=h;break}t=h.sibling,h.sibling=a,a=h,h=t}Ef(n,!0,a,null,x);break;case"together":Ef(n,!1,null,null,void 0);break;default:n.memoizedState=null}return n.child}function sr(t,n,a){if(t!==null&&(n.dependencies=t.dependencies),Ir|=n.lanes,(a&n.childLanes)===0)if(t!==null){if(ks(t,n,a,!1),(a&n.childLanes)===0)return null}else return null;if(t!==null&&n.child!==t.child)throw Error(s(153));if(n.child!==null){for(t=n.child,a=Jn(t,t.pendingProps),n.child=a,a.return=n;t.sibling!==null;)t=t.sibling,a=a.sibling=Jn(t,t.pendingProps),a.return=n;a.sibling=null}return n.child}function _f(t,n){return(t.lanes&n)!==0?!0:(t=t.dependencies,!!(t!==null&&kl(t)))}function vE(t,n,a){switch(n.tag){case 3:ae(n,n.stateNode.containerInfo),Cr(n,mt,t.memoizedState.cache),As();break;case 27:case 5:ge(n);break;case 4:ae(n,n.stateNode.containerInfo);break;case 10:Cr(n,n.type,n.memoizedProps.value);break;case 13:var l=n.memoizedState;if(l!==null)return l.dehydrated!==null?(Rr(n),n.flags|=128,null):(a&n.child.childLanes)!==0?Bx(t,n,a):(Rr(n),t=sr(t,n,a),t!==null?t.sibling:null);Rr(n);break;case 19:var h=(t.flags&128)!==0;if(l=(a&n.childLanes)!==0,l||(ks(t,n,a,!1),l=(a&n.childLanes)!==0),h){if(l)return Px(t,n,a);n.flags|=128}if(h=n.memoizedState,h!==null&&(h.rendering=null,h.tail=null,h.lastEffect=null),Z(pt,pt.current),l)break;return null;case 22:case 23:return n.lanes=0,Ox(t,n,a);case 24:Cr(n,mt,t.memoizedState.cache)}return sr(t,n,a)}function $x(t,n,a){if(t!==null)if(t.memoizedProps!==n.pendingProps)vt=!0;else{if(!_f(t,a)&&(n.flags&128)===0)return vt=!1,vE(t,n,a);vt=(t.flags&131072)!==0}else vt=!1,Ye&&(n.flags&1048576)!==0&&yg(n,Ml,n.index);switch(n.lanes=0,n.tag){case 16:e:{t=n.pendingProps;var l=n.elementType,h=l._init;if(l=h(l._payload),n.type=l,typeof l=="function")Dd(l)?(t=Eo(l,t),n.tag=1,n=Hx(null,n,l,t,a)):(n.tag=0,n=yf(null,n,l,t,a));else{if(l!=null){if(h=l.$$typeof,h===M){n.tag=11,n=Tx(null,n,l,t,a);break e}else if(h===I){n.tag=14,n=Rx(null,n,l,t,a);break e}}throw n=z(l)||l,Error(s(306,n,""))}}return n;case 0:return yf(t,n,n.type,n.pendingProps,a);case 1:return l=n.type,h=Eo(l,n.pendingProps),Hx(t,n,l,h,a);case 3:e:{if(ae(n,n.stateNode.containerInfo),t===null)throw Error(s(387));l=n.pendingProps;var x=n.memoizedState;h=x.element,Xd(t,n),Hs(n,l,null,a);var j=n.memoizedState;if(l=j.cache,Cr(n,mt,l),l!==x.cache&&Pd(n,[mt],a,!0),Ls(),l=j.element,x.isDehydrated)if(x={element:l,isDehydrated:!1,cache:j.cache},n.updateQueue.baseState=x,n.memoizedState=x,n.flags&256){n=Ix(t,n,l,a);break e}else if(l!==h){h=rn(Error(s(424)),n),Ms(h),n=Ix(t,n,l,a);break e}else{switch(t=n.stateNode.containerInfo,t.nodeType){case 9:t=t.body;break;default:t=t.nodeName==="HTML"?t.ownerDocument.body:t}for(ot=yn(t.firstChild),Rt=n,Ye=!0,yo=null,An=!0,a=bx(n,null,l,a),n.child=a;a;)a.flags=a.flags&-3|4096,a=a.sibling}else{if(As(),l===h){n=sr(t,n,a);break e}Nt(t,n,l,a)}n=n.child}return n;case 26:return Zl(t,n),t===null?(a=G0(n.type,null,n.pendingProps,null))?n.memoizedState=a:Ye||(a=n.type,t=n.pendingProps,l=uc(fe.current).createElement(a),l[yt]=n,l[At]=t,_t(l,a,t),ct(l),n.stateNode=l):n.memoizedState=G0(n.type,t.memoizedProps,n.pendingProps,t.memoizedState),null;case 27:return ge(n),t===null&&Ye&&(l=n.stateNode=V0(n.type,n.pendingProps,fe.current),Rt=n,An=!0,h=ot,$r(n.type)?(ah=h,ot=yn(l.firstChild)):ot=h),Nt(t,n,n.pendingProps.children,a),Zl(t,n),t===null&&(n.flags|=4194304),n.child;case 5:return t===null&&Ye&&((h=l=ot)&&(l=XE(l,n.type,n.pendingProps,An),l!==null?(n.stateNode=l,Rt=n,ot=yn(l.firstChild),An=!1,h=!0):h=!1),h||vo(n)),ge(n),h=n.type,x=n.pendingProps,j=t!==null?t.memoizedProps:null,l=x.children,th(h,x)?l=null:j!==null&&th(h,j)&&(n.flags|=32),n.memoizedState!==null&&(h=Jd(t,n,dE,null,null,a),ai._currentValue=h),Zl(t,n),Nt(t,n,l,a),n.child;case 6:return t===null&&Ye&&((t=a=ot)&&(a=FE(a,n.pendingProps,An),a!==null?(n.stateNode=a,Rt=n,ot=null,t=!0):t=!1),t||vo(n)),null;case 13:return Bx(t,n,a);case 4:return ae(n,n.stateNode.containerInfo),l=n.pendingProps,t===null?n.child=pa(n,null,l,a):Nt(t,n,l,a),n.child;case 11:return Tx(t,n,n.type,n.pendingProps,a);case 7:return Nt(t,n,n.pendingProps,a),n.child;case 8:return Nt(t,n,n.pendingProps.children,a),n.child;case 12:return Nt(t,n,n.pendingProps.children,a),n.child;case 10:return l=n.pendingProps,Cr(n,n.type,l.value),Nt(t,n,l.children,a),n.child;case 9:return h=n.type._context,l=n.pendingProps.children,wo(n),h=Mt(h),l=l(h),n.flags|=1,Nt(t,n,l,a),n.child;case 14:return Rx(t,n,n.type,n.pendingProps,a);case 15:return Dx(t,n,n.type,n.pendingProps,a);case 19:return Px(t,n,a);case 31:return l=n.pendingProps,a=n.mode,l={mode:l.mode,children:l.children},t===null?(a=Kl(l,a),a.ref=n.ref,n.child=a,a.return=n,n=a):(a=Jn(t.child,l),a.ref=n.ref,n.child=a,a.return=n,n=a),n;case 22:return Ox(t,n,a);case 24:return wo(n),l=Mt(mt),t===null?(h=Yd(),h===null&&(h=Je,x=$d(),h.pooledCache=x,x.refCount++,x!==null&&(h.pooledCacheLanes|=a),h=x),n.memoizedState={parent:l,cache:h},Gd(n),Cr(n,mt,h)):((t.lanes&a)!==0&&(Xd(t,n),Hs(n,null,null,a),Ls()),h=t.memoizedState,x=n.memoizedState,h.parent!==l?(h={parent:l,cache:l},n.memoizedState=h,n.lanes===0&&(n.memoizedState=n.updateQueue.baseState=h),Cr(n,mt,l)):(l=x.cache,Cr(n,mt,l),l!==h.cache&&Pd(n,[mt],a,!0))),Nt(t,n,n.pendingProps.children,a),n.child;case 29:throw n.pendingProps}throw Error(s(156,n.tag))}function ir(t){t.flags|=4}function Vx(t,n){if(n.type!=="stylesheet"||(n.state.loading&4)!==0)t.flags&=-16777217;else if(t.flags|=16777216,!W0(n)){if(n=ln.current,n!==null&&((Ue&4194048)===Ue?Mn!==null:(Ue&62914560)!==Ue&&(Ue&536870912)===0||n!==Mn))throw Os=qd,jg;t.flags|=8192}}function Wl(t,n){n!==null&&(t.flags|=4),t.flags&16384&&(n=t.tag!==22?sl():536870912,t.lanes|=n,va|=n)}function Ys(t,n){if(!Ye)switch(t.tailMode){case"hidden":n=t.tail;for(var a=null;n!==null;)n.alternate!==null&&(a=n),n=n.sibling;a===null?t.tail=null:a.sibling=null;break;case"collapsed":a=t.tail;for(var l=null;a!==null;)a.alternate!==null&&(l=a),a=a.sibling;l===null?n||t.tail===null?t.tail=null:t.tail.sibling=null:l.sibling=null}}function nt(t){var n=t.alternate!==null&&t.alternate.child===t.child,a=0,l=0;if(n)for(var h=t.child;h!==null;)a|=h.lanes|h.childLanes,l|=h.subtreeFlags&65011712,l|=h.flags&65011712,h.return=t,h=h.sibling;else for(h=t.child;h!==null;)a|=h.lanes|h.childLanes,l|=h.subtreeFlags,l|=h.flags,h.return=t,h=h.sibling;return t.subtreeFlags|=l,t.childLanes=a,n}function bE(t,n,a){var l=n.pendingProps;switch(Hd(n),n.tag){case 31:case 16:case 15:case 0:case 11:case 7:case 8:case 12:case 9:case 14:return nt(n),null;case 1:return nt(n),null;case 3:return a=n.stateNode,l=null,t!==null&&(l=t.memoizedState.cache),n.memoizedState.cache!==l&&(n.flags|=2048),rr(mt),me(),a.pendingContext&&(a.context=a.pendingContext,a.pendingContext=null),(t===null||t.child===null)&&(Cs(n)?ir(n):t===null||t.memoizedState.isDehydrated&&(n.flags&256)===0||(n.flags|=1024,wg())),nt(n),null;case 26:return a=n.memoizedState,t===null?(ir(n),a!==null?(nt(n),Vx(n,a)):(nt(n),n.flags&=-16777217)):a?a!==t.memoizedState?(ir(n),nt(n),Vx(n,a)):(nt(n),n.flags&=-16777217):(t.memoizedProps!==l&&ir(n),nt(n),n.flags&=-16777217),null;case 27:se(n),a=fe.current;var h=n.type;if(t!==null&&n.stateNode!=null)t.memoizedProps!==l&&ir(n);else{if(!l){if(n.stateNode===null)throw Error(s(166));return nt(n),null}t=te.current,Cs(n)?vg(n):(t=V0(h,l,a),n.stateNode=t,ir(n))}return nt(n),null;case 5:if(se(n),a=n.type,t!==null&&n.stateNode!=null)t.memoizedProps!==l&&ir(n);else{if(!l){if(n.stateNode===null)throw Error(s(166));return nt(n),null}if(t=te.current,Cs(n))vg(n);else{switch(h=uc(fe.current),t){case 1:t=h.createElementNS("http://www.w3.org/2000/svg",a);break;case 2:t=h.createElementNS("http://www.w3.org/1998/Math/MathML",a);break;default:switch(a){case"svg":t=h.createElementNS("http://www.w3.org/2000/svg",a);break;case"math":t=h.createElementNS("http://www.w3.org/1998/Math/MathML",a);break;case"script":t=h.createElement("div"),t.innerHTML=" - + +
diff --git a/python/packages/devui/dev.md b/python/packages/devui/dev.md index 3d74f19970..a41b2d8aac 100644 --- a/python/packages/devui/dev.md +++ b/python/packages/devui/dev.md @@ -9,8 +9,6 @@ git clone https://github.com/microsoft/agent-framework.git cd agent-framework ``` -(or use the latest main branch if merged) - ## 2. Setup Environment Navigate to the Python directory and install dependencies: @@ -47,7 +45,7 @@ AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="your-deployment-name" **Option A: In-Memory Mode (Recommended for quick testing)** ```bash -cd packages/devui/samples +cd samples/getting_started/devui python in_memory_mode.py ``` @@ -56,7 +54,7 @@ This runs a simple example with predefined agents and opens your browser automat **Option B: Directory-Based Discovery** ```bash -cd packages/devui/samples +cd samples/getting_started/devui devui ``` @@ -72,57 +70,91 @@ This launches the UI with all example agents/workflows at http://localhost:8080 You can also test via API calls: +### Single Request + ```bash curl -X POST http://localhost:8080/v1/responses \ -H "Content-Type: application/json" \ -d '{ - "model": "agent-framework", + "model": "weather_agent", + "input": "What is the weather in Seattle?" + }' +``` + +### Multi-turn Conversations + +```bash +# Create a conversation +curl -X POST http://localhost:8080/v1/conversations \ + -H "Content-Type: application/json" \ + -d '{"metadata": {"agent_id": "weather_agent"}}' + +# Returns: {"id": "conv_abc123", ...} + +# Use conversation ID in requests +curl -X POST http://localhost:8080/v1/responses \ + -H "Content-Type: application/json" \ + -d '{ + "model": "weather_agent", "input": "What is the weather in Seattle?", - "extra_body": {"entity_id": "weather_agent"} + "conversation": "conv_abc123" + }' + +# Continue the conversation +curl -X POST http://localhost:8080/v1/responses \ + -H "Content-Type: application/json" \ + -d '{ + "model": "weather_agent", + "input": "How about tomorrow?", + "conversation": "conv_abc123" }' ``` ## API Mapping -Messages and events from agents/workflows are mapped to OpenAI response types in `agent_framework_devui/_mapper.py`. See the mapping table below: - -| Agent Framework Content | OpenAI Event | Type | -| --------------------------------- | ----------------------------------------- | -------- | -| `TextContent` | `ResponseTextDeltaEvent` | Official | -| `TextReasoningContent` | `ResponseReasoningTextDeltaEvent` | Official | -| `FunctionCallContent` | `ResponseFunctionCallArgumentsDeltaEvent` | Official | -| `FunctionResultContent` | `ResponseFunctionResultComplete` | Custom | -| `ErrorContent` | `ResponseErrorEvent` | Official | -| `UsageContent` | `ResponseUsageEventComplete` | Custom | -| `DataContent` | `ResponseTraceEventComplete` | Custom | -| `UriContent` | `ResponseTraceEventComplete` | Custom | -| `HostedFileContent` | `ResponseTraceEventComplete` | Custom | -| `HostedVectorStoreContent` | `ResponseTraceEventComplete` | Custom | -| `FunctionApprovalRequestContent` | Custom event | Custom | -| `FunctionApprovalResponseContent` | Custom event | Custom | -| `WorkflowEvent` | `ResponseWorkflowEventComplete` | Custom | +Agent Framework content types → OpenAI Responses API events (in `_mapper.py`): -## Frontend Development +| Agent Framework Content | OpenAI Event | Status | +| ------------------------------- | ---------------------------------------- | -------- | +| `TextContent` | `response.output_text.delta` | Standard | +| `TextReasoningContent` | `response.reasoning.delta` | Standard | +| `FunctionCallContent` (initial) | `response.output_item.added` | Standard | +| `FunctionCallContent` (args) | `response.function_call_arguments.delta` | Standard | +| `FunctionResultContent` | `response.function_result.complete` | Standard | +| `ErrorContent` | `response.error` | Standard | +| `UsageContent` | `response.usage.complete` | Extended | +| `WorkflowEvent` | `response.workflow.event` | DevUI | +| `DataContent`, `UriContent` | `response.trace.complete` | DevUI | -To build the frontend: +- **Standard** = OpenAI spec, **Extended** = OpenAI + extra fields, **DevUI** = DevUI-specific + +## Frontend Development ```bash -cd frontend +cd python/packages/devui/frontend yarn install -# Create .env.local with backend URL -echo 'VITE_API_BASE_URL=http://localhost:8000' > .env.local - -# Create .env.production (empty for relative URLs) -echo '' > .env.production - -# Development +# Development (hot reload) yarn dev -# Build (copies to backend) +# Build (copies to backend ui/) yarn build ``` +## Running Tests + +```bash +cd python/packages/devui + +# All tests +pytest tests/ -v + +# Specific suites +pytest tests/test_conversations.py -v # Conversation store +pytest tests/test_server.py -v # API endpoints +pytest tests/test_mapper.py -v # Event mapping +``` + ## Troubleshooting - **Missing API key**: Make sure your `.env` file is in the `python/` directory with valid credentials. Or set environment variables directly in your shell before running DevUI. diff --git a/python/packages/devui/frontend/src/App.tsx b/python/packages/devui/frontend/src/App.tsx index e606b7335b..fdb6c0d52e 100644 --- a/python/packages/devui/frontend/src/App.tsx +++ b/python/packages/devui/frontend/src/App.tsx @@ -4,12 +4,10 @@ */ import { useState, useEffect, useCallback } from "react"; -import { AppHeader } from "@/components/shared/app-header"; -import { DebugPanel } from "@/components/shared/debug-panel"; -import { SettingsModal } from "@/components/shared/settings-modal"; -import { GalleryView } from "@/components/gallery"; -import { AgentView } from "@/components/agent/agent-view"; -import { WorkflowView } from "@/components/workflow/workflow-view"; +import { AppHeader, DebugPanel, SettingsModal } from "@/components/layout"; +import { GalleryView } from "@/components/features/gallery"; +import { AgentView } from "@/components/features/agent"; +import { WorkflowView } from "@/components/features/workflow"; import { LoadingState } from "@/components/ui/loading-state"; import { Toast } from "@/components/ui/toast"; import { apiClient } from "@/services/api"; diff --git a/python/packages/devui/frontend/src/components/shared/agent-details-modal.tsx b/python/packages/devui/frontend/src/components/features/agent/agent-details-modal.tsx similarity index 100% rename from python/packages/devui/frontend/src/components/shared/agent-details-modal.tsx rename to python/packages/devui/frontend/src/components/features/agent/agent-details-modal.tsx diff --git a/python/packages/devui/frontend/src/components/agent/agent-view.tsx b/python/packages/devui/frontend/src/components/features/agent/agent-view.tsx similarity index 52% rename from python/packages/devui/frontend/src/components/agent/agent-view.tsx rename to python/packages/devui/frontend/src/components/features/agent/agent-view.tsx index 7b0bcfb2bb..ff9a908895 100644 --- a/python/packages/devui/frontend/src/components/agent/agent-view.tsx +++ b/python/packages/devui/frontend/src/components/features/agent/agent-view.tsx @@ -1,6 +1,6 @@ /** * AgentView - Complete agent interaction interface - * Features: Chat interface, message streaming, thread management + * Features: Chat interface, message streaming, conversation management */ import { useState, useCallback, useRef, useEffect } from "react"; @@ -12,7 +12,7 @@ import { AttachmentGallery, type AttachmentItem, } from "@/components/ui/attachment-gallery"; -import { MessageRenderer } from "@/components/message_renderer"; +import { OpenAIMessageRenderer } from "./message-renderers/OpenAIMessageRenderer"; import { LoadingSpinner } from "@/components/ui/loading-spinner"; import { Select, @@ -21,7 +21,7 @@ import { SelectTrigger, SelectValue, } from "@/components/ui/select"; -import { AgentDetailsModal } from "@/components/shared/agent-details-modal"; +import { AgentDetailsModal } from "./agent-details-modal"; import { SendHorizontal, User, @@ -32,18 +32,20 @@ import { Info, Trash2, FileText, + Check, + X, } from "lucide-react"; import { apiClient } from "@/services/api"; import type { AgentInfo, - ChatMessage, RunAgentRequest, - ThreadInfo, + Conversation, ExtendedResponseStreamEvent, + PendingApproval, } from "@/types"; interface ChatState { - messages: ChatMessage[]; + items: import("@/types/openai").ConversationItem[]; // Pure OpenAI types - no legacy ChatMessage isStreaming: boolean; } @@ -54,103 +56,89 @@ interface AgentViewProps { onDebugEvent: DebugEventHandler; } -interface MessageBubbleProps { - message: ChatMessage; +interface ConversationItemBubbleProps { + item: import("@/types/openai").ConversationItem; } -function MessageBubble({ message }: MessageBubbleProps) { - const isUser = message.role === "user"; - const isError = message.error; - const Icon = isUser ? User : isError ? AlertCircle : Bot; +function ConversationItemBubble({ item }: ConversationItemBubbleProps) { + // Handle different item types + if (item.type === "message") { + const isUser = item.role === "user"; + const isError = item.status === "incomplete"; + const Icon = isUser ? User : isError ? AlertCircle : Bot; - return ( -
-
- -
- -
+ return ( +
- {isError && ( -
- - - Unable to process request - + +
+ +
+
+ {isError && ( +
+ + + Unable to process request + +
+ )} +
+
- )} -
-
-
-
- {new Date(message.timestamp).toLocaleTimeString()} - {!isUser && message.usage && ( - <> - - - {message.usage.total_tokens >= 1000 - ? `${(message.usage.total_tokens / 1000).toFixed(2)}k` - : message.usage.total_tokens}{" "} - tokens - {message.usage.prompt_tokens > 0 && ( - - {" "} - ( - {message.usage.prompt_tokens >= 1000 - ? `${(message.usage.prompt_tokens / 1000).toFixed(1)}k` - : message.usage.prompt_tokens}{" "} - in,{" "} - {message.usage.completion_tokens >= 1000 - ? `${(message.usage.completion_tokens / 1000).toFixed(1)}k` - : message.usage.completion_tokens}{" "} - out) +
+ {new Date().toLocaleTimeString()} + {!isUser && item.usage && ( + <> + + + + ↓{item.usage.input_tokens} - )} - - - )} + + ↑{item.usage.output_tokens} + + ({item.usage.total_tokens} tokens) + + + )} +
-
- ); -} + ); + } -function TypingIndicator() { + // Function calls and results - render with neutral styling return (
-
-
-
-
-
+
+
+
@@ -159,27 +147,32 @@ function TypingIndicator() { export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { const [chatState, setChatState] = useState({ - messages: [], + items: [], isStreaming: false, }); - const [currentThread, setCurrentThread] = useState( - undefined - ); - const [availableThreads, setAvailableThreads] = useState([]); + const [currentConversation, setCurrentConversation] = useState< + Conversation | undefined + >(undefined); + const [availableConversations, setAvailableConversations] = useState< + Conversation[] + >([]); const [inputValue, setInputValue] = useState(""); const [isSubmitting, setIsSubmitting] = useState(false); const [attachments, setAttachments] = useState([]); - const [loadingThreads, setLoadingThreads] = useState(false); + const [loadingConversations, setLoadingConversations] = useState(false); const [isDragOver, setIsDragOver] = useState(false); const [dragCounter, setDragCounter] = useState(0); const [pasteNotification, setPasteNotification] = useState( null ); const [detailsModalOpen, setDetailsModalOpen] = useState(false); - const [threadUsage, setThreadUsage] = useState<{ + const [conversationUsage, setConversationUsage] = useState<{ total_tokens: number; message_count: number; }>({ total_tokens: 0, message_count: 0 }); + const [pendingApprovals, setPendingApprovals] = useState( + [] + ); const scrollAreaRef = useRef(null); const messagesEndRef = useRef(null); @@ -187,64 +180,118 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { const textareaRef = useRef(null); const currentMessageUsage = useRef<{ total_tokens: number; - prompt_tokens: number; - completion_tokens: number; + input_tokens: number; + output_tokens: number; } | null>(null); - // Auto-scroll to bottom when new messages arrive + // Auto-scroll to bottom when new items arrive useEffect(() => { messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); - }, [chatState.messages, chatState.isStreaming]); + }, [chatState.items, chatState.isStreaming]); + + // Return focus to input after streaming completes + useEffect(() => { + if (!chatState.isStreaming && !isSubmitting) { + textareaRef.current?.focus(); + } + }, [chatState.isStreaming, isSubmitting]); - // Load threads when agent changes + // Load conversations when agent changes useEffect(() => { - const loadThreads = async () => { + const loadConversations = async () => { if (!selectedAgent) return; - setLoadingThreads(true); + setLoadingConversations(true); try { - const threads = await apiClient.getThreads(selectedAgent.id); - setAvailableThreads(threads); + // Step 1: Try to list conversations from backend (DevUI extension) + // This works with DevUI backend but fails with OpenAI/Azure (they don't have list endpoint) + try { + const { data: conversations } = await apiClient.listConversations( + selectedAgent.id + ); - // Auto-select the most recent thread if available - if (threads.length > 0) { - const mostRecentThread = threads[0]; // Assuming threads are sorted by creation date (newest first) - setCurrentThread(mostRecentThread); + if (conversations.length > 0) { + // Found conversations on backend - use most recent + const mostRecent = conversations[0]; + setAvailableConversations(conversations); + setCurrentConversation(mostRecent); - // Load messages for the selected thread - try { - const threadMessages = await apiClient.getThreadMessages( - mostRecentThread.id + // Load conversation items from backend + try { + const { data: items } = await apiClient.listConversationItems( + mostRecent.id + ); + + // Use OpenAI ConversationItems directly (no conversion!) + setChatState({ + items: items as import("@/types/openai").ConversationItem[], + isStreaming: false + }); + } catch { + setChatState({ items: [], isStreaming: false }); + } + + // Cache to localStorage for faster future loads + localStorage.setItem( + `devui_convs_${selectedAgent.id}`, + JSON.stringify(conversations) ); - setChatState({ - messages: threadMessages, - isStreaming: false, - }); - } catch (error) { - console.error("Failed to load thread messages:", error); - setChatState({ - messages: [], - isStreaming: false, - }); + return; } + } catch { + // Backend doesn't support list endpoint (OpenAI, Azure, etc.) + // This is expected - fall through to localStorage } - } catch (error) { - console.error("Failed to load threads:", error); - setAvailableThreads([]); + + // Step 2: Try localStorage (works with all backends) + const cachedKey = `devui_convs_${selectedAgent.id}`; + const cached = localStorage.getItem(cachedKey); + + if (cached) { + try { + const convs = JSON.parse(cached) as Conversation[]; + + if (convs.length > 0) { + // Use most recent cached conversation + setAvailableConversations(convs); + setCurrentConversation(convs[0]); + setChatState({ items: [], isStreaming: false }); + return; + } + } catch { + // Invalid cache - clear it + localStorage.removeItem(cachedKey); + } + } + + // Step 3: No conversations found - create new + const newConversation = await apiClient.createConversation({ + agent_id: selectedAgent.id, + }); + + setCurrentConversation(newConversation); + setAvailableConversations([newConversation]); + setChatState({ items: [], isStreaming: false }); + + // Save to localStorage + localStorage.setItem(cachedKey, JSON.stringify([newConversation])); + } catch { + setAvailableConversations([]); + setChatState({ items: [], isStreaming: false }); } finally { - setLoadingThreads(false); + setLoadingConversations(false); } }; // Clear chat when agent changes setChatState({ - messages: [], + items: [], isStreaming: false, }); - setCurrentThread(undefined); + setCurrentConversation(undefined); accumulatedText.current = ""; - loadThreads(); + loadConversations(); }, [selectedAgent]); // Handle file uploads @@ -369,12 +416,14 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { const start = textarea.selectionStart; const end = textarea.selectionEnd; const currentValue = textarea.value; - const newValue = currentValue.slice(0, start) + text + currentValue.slice(end); + const newValue = + currentValue.slice(0, start) + text + currentValue.slice(end); setInputValue(newValue); // Restore cursor position after the inserted text setTimeout(() => { - textarea.selectionStart = textarea.selectionEnd = start + text.length; + textarea.selectionStart = textarea.selectionEnd = + start + text.length; textarea.focus(); }, 0); } @@ -405,7 +454,7 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { // Detect file extension from content const detectFileExtension = (text: string): string => { const trimmed = text.trim(); - const lines = trimmed.split('\n'); + const lines = trimmed.split("\n"); // JSON detection if (/^{[\s\S]*}$|^\[[\s\S]*\]$/.test(trimmed)) return ".json"; @@ -421,18 +470,26 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { // CSV detection (more strict) - need multiple lines with consistent comma patterns if (lines.length > 2) { - const commaLines = lines.filter(line => line.includes(',')); - const semicolonLines = lines.filter(line => line.includes(';')); + const commaLines = lines.filter((line) => line.includes(",")); + const semicolonLines = lines.filter((line) => line.includes(";")); // If >50% of lines have commas and it looks tabular if (commaLines.length > lines.length * 0.5) { - const avgCommas = commaLines.reduce((sum, line) => sum + (line.match(/,/g) || []).length, 0) / commaLines.length; + const avgCommas = + commaLines.reduce( + (sum, line) => sum + (line.match(/,/g) || []).length, + 0 + ) / commaLines.length; if (avgCommas >= 2) return ".csv"; } // If >50% of lines have semicolons and it looks tabular if (semicolonLines.length > lines.length * 0.5) { - const avgSemicolons = semicolonLines.reduce((sum, line) => sum + (line.match(/;/g) || []).length, 0) / semicolonLines.length; + const avgSemicolons = + semicolonLines.reduce( + (sum, line) => sum + (line.match(/;/g) || []).length, + 0 + ) / semicolonLines.length; if (avgSemicolons >= 2) return ".csv"; } } @@ -457,28 +514,35 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { }); }; - // Handle new thread creation - const handleNewThread = useCallback(async () => { + // Handle new conversation creation + const handleNewConversation = useCallback(async () => { if (!selectedAgent) return; try { - const newThread = await apiClient.createThread(selectedAgent.id); - setCurrentThread(newThread); - setAvailableThreads((prev) => [newThread, ...prev]); + const newConversation = await apiClient.createConversation({ + agent_id: selectedAgent.id, + }); + setCurrentConversation(newConversation); + setAvailableConversations((prev) => [newConversation, ...prev]); setChatState({ - messages: [], + items: [], isStreaming: false, }); - setThreadUsage({ total_tokens: 0, message_count: 0 }); + setConversationUsage({ total_tokens: 0, message_count: 0 }); accumulatedText.current = ""; - } catch (error) { - console.error("Failed to create thread:", error); + + // Update localStorage cache with new conversation + const cachedKey = `devui_convs_${selectedAgent.id}`; + const updated = [newConversation, ...availableConversations]; + localStorage.setItem(cachedKey, JSON.stringify(updated)); + } catch { + // Failed to create conversation } - }, [selectedAgent]); + }, [selectedAgent, availableConversations]); - // Handle thread deletion - const handleDeleteThread = useCallback( - async (threadId: string, e?: React.MouseEvent) => { + // Handle conversation deletion + const handleDeleteConversation = useCallback( + async (conversationId: string, e?: React.MouseEvent) => { // Prevent event from bubbling to SelectItem if (e) { e.preventDefault(); @@ -486,46 +550,46 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { } // Confirm deletion - if (!confirm("Delete this thread? This cannot be undone.")) { + if (!confirm("Delete this conversation? This cannot be undone.")) { return; } try { - const success = await apiClient.deleteThread(threadId); + const success = await apiClient.deleteConversation(conversationId); if (success) { - // Remove thread from available threads - const updatedThreads = availableThreads.filter((t) => t.id !== threadId); - setAvailableThreads(updatedThreads); - - // If deleted thread was selected, switch to another thread or clear chat - if (currentThread?.id === threadId) { - if (updatedThreads.length > 0) { - // Select the most recent remaining thread - const nextThread = updatedThreads[0]; - setCurrentThread(nextThread); - - // Load messages for the next thread - try { - const threadMessages = await apiClient.getThreadMessages(nextThread.id); - setChatState({ - messages: threadMessages, - isStreaming: false, - }); - } catch (error) { - console.error("Failed to load thread messages:", error); - setChatState({ - messages: [], - isStreaming: false, - }); - } + // Remove conversation from available conversations + const updatedConversations = availableConversations.filter( + (c) => c.id !== conversationId + ); + setAvailableConversations(updatedConversations); + + // Update localStorage cache + if (selectedAgent) { + const cachedKey = `devui_convs_${selectedAgent.id}`; + localStorage.setItem( + cachedKey, + JSON.stringify(updatedConversations) + ); + } + + // If deleted conversation was selected, switch to another conversation or clear chat + if (currentConversation?.id === conversationId) { + if (updatedConversations.length > 0) { + // Select the most recent remaining conversation + const nextConversation = updatedConversations[0]; + setCurrentConversation(nextConversation); + setChatState({ + items: [], + isStreaming: false, + }); } else { - // No threads left, clear everything - setCurrentThread(undefined); + // No conversations left, clear everything + setCurrentConversation(undefined); setChatState({ - messages: [], + items: [], isStreaming: false, }); - setThreadUsage({ total_tokens: 0, message_count: 0 }); + setConversationUsage({ total_tokens: 0, message_count: 0 }); accumulatedText.current = ""; } } @@ -533,163 +597,176 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { // Clear debug panel onDebugEvent("clear"); } - } catch (error) { - console.error("Failed to delete thread:", error); - alert("Failed to delete thread. Please try again."); + } catch { + alert("Failed to delete conversation. Please try again."); } }, - [availableThreads, currentThread, onDebugEvent] + [availableConversations, currentConversation, selectedAgent, onDebugEvent] ); - // Handle thread selection - const handleThreadSelect = useCallback( - async (threadId: string) => { - const thread = availableThreads.find((t) => t.id === threadId); - if (!thread) return; + // Handle conversation selection + const handleConversationSelect = useCallback( + async (conversationId: string) => { + const conversation = availableConversations.find( + (c) => c.id === conversationId + ); + if (!conversation) return; - setCurrentThread(thread); + setCurrentConversation(conversation); - // Clear debug panel when switching threads + // Clear debug panel when switching conversations onDebugEvent("clear"); try { - // Load thread messages from backend - const threadMessages = await apiClient.getThreadMessages(threadId); + // Load conversation history from backend + const result = await apiClient.listConversationItems(conversationId); + + // Use OpenAI ConversationItems directly (no conversion!) + const items = result.data as import("@/types/openai").ConversationItem[]; setChatState({ - messages: threadMessages, + items, isStreaming: false, }); - // Calculate cumulative usage for this thread - const totalTokens = threadMessages.reduce( - (sum, msg) => sum + (msg.usage?.total_tokens || 0), - 0 - ); - const messageCount = threadMessages.filter( - (msg) => msg.role === "assistant" && msg.usage - ).length; - setThreadUsage({ total_tokens: totalTokens, message_count: messageCount }); - - console.log( - `Restored ${threadMessages.length} messages for thread ${threadId}` - ); - } catch (error) { - console.error("Failed to load thread messages:", error); - // Fallback to clearing messages + // Calculate usage from loaded items + setConversationUsage({ + total_tokens: 0, // We don't have usage info in stored items + message_count: items.length, + }); + } catch { + // Fallback to clearing items setChatState({ - messages: [], + items: [], isStreaming: false, }); + setConversationUsage({ total_tokens: 0, message_count: 0 }); } accumulatedText.current = ""; }, - [availableThreads] + [availableConversations, onDebugEvent] ); + // Handle function approval responses + const handleApproval = async (request_id: string, approved: boolean) => { + const approval = pendingApprovals.find((a) => a.request_id === request_id); + if (!approval) return; + + // Create approval response in OpenAI-compatible format + const approvalInput: import("@/types/agent-framework").ResponseInputParam = [ + { + type: "message", // CRITICAL: Must set type for backend to recognize it + role: "user", + content: [ + { + type: "function_approval_response", + request_id: request_id, + approved: approved, + function_call: approval.function_call, + } as import("@/types/openai").MessageFunctionApprovalResponseContent, + ], + }, + ]; + + // Send approval response through the conversation + // We'll call handleSendMessage directly when invoked (it's defined below) + const request: RunAgentRequest = { + input: approvalInput, + conversation_id: currentConversation?.id, + }; + + // Remove from pending immediately (will be confirmed by backend event) + setPendingApprovals((prev) => + prev.filter((a) => a.request_id !== request_id) + ); + + // Trigger send (we'll call this from the UI button handler) + return request; + }; + // Handle message sending const handleSendMessage = useCallback( async (request: RunAgentRequest) => { if (!selectedAgent) return; - // Extract text and attachments from OpenAI format for UI display - let displayText = ""; - const attachmentContents: import("@/types/agent-framework").Contents[] = - []; + // Extract content from OpenAI format to create ConversationMessage + const messageContent: import("@/types/openai").MessageContent[] = []; - // Parse OpenAI ResponseInputParam to extract display content + // Parse OpenAI ResponseInputParam to extract content for (const inputItem of request.input) { if (inputItem.type === "message" && Array.isArray(inputItem.content)) { for (const contentItem of inputItem.content) { if (contentItem.type === "input_text") { - displayText += contentItem.text + " "; + messageContent.push({ + type: "text", + text: contentItem.text, + }); } else if (contentItem.type === "input_image") { - attachmentContents.push({ - type: "data", - uri: contentItem.image_url || "", - media_type: "image/png", // Default, should extract from data URI - } as import("@/types/agent-framework").DataContent); + messageContent.push({ + type: "input_image", + image_url: contentItem.image_url || "", + detail: "auto", + }); } else if (contentItem.type === "input_file") { - const dataUri = `data:application/octet-stream;base64,${contentItem.file_data}`; - // Determine media type from filename - const filename = (contentItem as import("@/types/agent-framework").ResponseInputFileParam).filename || ""; - let mediaType = "application/octet-stream"; - - if (filename.endsWith(".pdf")) mediaType = "application/pdf"; - else if (filename.endsWith(".txt")) mediaType = "text/plain"; - else if (filename.endsWith(".json")) mediaType = "application/json"; - else if (filename.endsWith(".csv")) mediaType = "text/csv"; - else if (filename.endsWith(".html")) mediaType = "text/html"; - else if (filename.endsWith(".md")) mediaType = "text/markdown"; - - attachmentContents.push({ - type: "data", - uri: dataUri, - media_type: mediaType, - } as import("@/types/agent-framework").DataContent); + const fileItem = contentItem as import("@/types/agent-framework").ResponseInputFileParam; + messageContent.push({ + type: "input_file", + file_data: fileItem.file_data, + filename: fileItem.filename, + }); } } } } - const userMessageContents: import("@/types/agent-framework").Contents[] = - [ - ...(displayText.trim() - ? [ - { - type: "text", - text: displayText.trim(), - } as import("@/types/agent-framework").TextContent, - ] - : []), - ...attachmentContents, - ]; - - // Add user message to UI state - const userMessage: ChatMessage = { + // Add user message to UI state (OpenAI ConversationMessage) + const userMessage: import("@/types/openai").ConversationMessage = { id: `user-${Date.now()}`, + type: "message", role: "user", - contents: userMessageContents, - timestamp: new Date().toISOString(), + content: messageContent, + status: "completed", }; setChatState((prev) => ({ ...prev, - messages: [...prev.messages, userMessage], + items: [...prev.items, userMessage], isStreaming: true, })); // Create assistant message placeholder - const assistantMessage: ChatMessage = { + const assistantMessage: import("@/types/openai").ConversationMessage = { id: `assistant-${Date.now()}`, + type: "message", role: "assistant", - contents: [], - timestamp: new Date().toISOString(), - streaming: true, + content: [], // Will be filled during streaming + status: "in_progress", }; setChatState((prev) => ({ ...prev, - messages: [...prev.messages, assistantMessage], + items: [...prev.items, assistantMessage], })); try { - // If no thread selected, create one automatically - let threadToUse = currentThread; - if (!threadToUse) { + // If no conversation selected, create one automatically + let conversationToUse = currentConversation; + if (!conversationToUse) { try { - threadToUse = await apiClient.createThread(selectedAgent.id); - setCurrentThread(threadToUse); - setAvailableThreads((prev) => [threadToUse!, ...prev]); - } catch (error) { - console.error("Failed to create thread:", error); + conversationToUse = await apiClient.createConversation({ + agent_id: selectedAgent.id, + }); + setCurrentConversation(conversationToUse); + setAvailableConversations((prev) => [conversationToUse!, ...prev]); + } catch { + // Failed to create conversation } } const apiRequest = { input: request.input, - thread_id: threadToUse?.id, + conversation_id: conversationToUse?.id, }; // Clear text accumulator for new response @@ -708,18 +785,45 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { // Pass all events to debug panel onDebugEvent(openAIEvent); - // Handle usage events - if (openAIEvent.type === "response.usage.complete") { - const usageEvent = openAIEvent as import("@/types").ResponseUsageEventComplete; - console.log("📊 Usage event received:", usageEvent.data); - if (usageEvent.data) { + // Handle response.completed event (OpenAI standard) + if (openAIEvent.type === "response.completed") { + const completedEvent = openAIEvent as import("@/types/openai").ResponseCompletedEvent; + const usage = completedEvent.response?.usage; + + if (usage) { currentMessageUsage.current = { - total_tokens: usageEvent.data.total_tokens || 0, - prompt_tokens: usageEvent.data.prompt_tokens || 0, - completion_tokens: usageEvent.data.completion_tokens || 0, + input_tokens: usage.input_tokens, + output_tokens: usage.output_tokens, + total_tokens: usage.total_tokens, }; - console.log("📊 Set usage:", currentMessageUsage.current); } + continue; // Continue processing other events + } + + // Handle function approval request events + if (openAIEvent.type === "response.function_approval.requested") { + const approvalEvent = openAIEvent as import("@/types/openai").ResponseFunctionApprovalRequestedEvent; + + // Add to pending approvals + setPendingApprovals((prev) => [ + ...prev, + { + request_id: approvalEvent.request_id, + function_call: approvalEvent.function_call, + }, + ]); + continue; // Don't add approval requests to chat UI + } + + // Handle function approval response events + if (openAIEvent.type === "response.function_approval.responded") { + const responseEvent = openAIEvent as import("@/types/openai").ResponseFunctionApprovalRespondedEvent; + + // Remove from pending approvals + setPendingApprovals((prev) => + prev.filter((a) => a.request_id !== responseEvent.request_id) + ); + continue; } // Handle error events from the stream @@ -733,20 +837,19 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { setChatState((prev) => ({ ...prev, isStreaming: false, - messages: prev.messages.map((msg) => - msg.id === assistantMessage.id + items: prev.items.map((item) => + item.id === assistantMessage.id && item.type === "message" ? { - ...msg, - contents: [ + ...item, + content: [ { type: "text", text: errorMessage, - }, + } as import("@/types/openai").MessageTextContent, ], - streaming: false, - error: true, // Add error flag for styling + status: "incomplete" as const, } - : msg + : item ), })); return; // Exit stream processing early on error @@ -763,18 +866,19 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { // Update assistant message with accumulated content setChatState((prev) => ({ ...prev, - messages: prev.messages.map((msg) => - msg.id === assistantMessage.id + items: prev.items.map((item) => + item.id === assistantMessage.id && item.type === "message" ? { - ...msg, - contents: [ + ...item, + content: [ { type: "text", text: accumulatedText.current, - }, + } as import("@/types/openai").MessageTextContent, ], + status: "in_progress" as const, } - : msg + : item ), })); } @@ -783,45 +887,43 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { // (Server will close the stream when done, so we'll exit the loop naturally) } - // Stream ended - mark as complete and attach usage + // Stream ended - mark as complete + // Usage is provided via response.completed event (OpenAI standard) const finalUsage = currentMessageUsage.current; - console.log("📊 Stream ended, attaching usage to message:", finalUsage); setChatState((prev) => ({ ...prev, isStreaming: false, - messages: prev.messages.map((msg) => - msg.id === assistantMessage.id + items: prev.items.map((item) => + item.id === assistantMessage.id && item.type === "message" ? { - ...msg, - streaming: false, + ...item, + status: "completed" as const, usage: finalUsage || undefined, } - : msg + : item ), })); - // Update thread-level usage stats + // Update conversation-level usage stats if (finalUsage) { - setThreadUsage((prev) => ({ + setConversationUsage((prev) => ({ total_tokens: prev.total_tokens + finalUsage.total_tokens, message_count: prev.message_count + 1, })); - console.log("📊 Updated thread usage"); } // Reset usage for next message currentMessageUsage.current = null; } catch (error) { - console.error("Streaming error:", error); setChatState((prev) => ({ ...prev, isStreaming: false, - messages: prev.messages.map((msg) => - msg.id === assistantMessage.id + items: prev.items.map((item) => + item.id === assistantMessage.id && item.type === "message" ? { - ...msg, - contents: [ + ...item, + content: [ { type: "text", text: `Error: ${ @@ -829,16 +931,16 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { ? error.message : "Failed to get response" }`, - }, + } as import("@/types/openai").MessageTextContent, ], - streaming: false, + status: "incomplete" as const, } - : msg + : item ), })); } }, - [selectedAgent, currentThread, onDebugEvent] + [selectedAgent, currentConversation, onDebugEvent] ); const handleSubmit = async (e: React.FormEvent) => { @@ -883,12 +985,12 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { } else if ( attachment.file.type === "text/plain" && (attachment.file.name.includes("pasted-text-") || - attachment.file.name.endsWith(".txt") || - attachment.file.name.endsWith(".csv") || - attachment.file.name.endsWith(".json") || - attachment.file.name.endsWith(".html") || - attachment.file.name.endsWith(".md") || - attachment.file.name.endsWith(".tsv")) + attachment.file.name.endsWith(".txt") || + attachment.file.name.endsWith(".csv") || + attachment.file.name.endsWith(".json") || + attachment.file.name.endsWith(".html") || + attachment.file.name.endsWith(".md") || + attachment.file.name.endsWith(".tsv")) ) { // Convert all text files (from pasted large text) back to input_text const text = await attachment.file.text(); @@ -920,7 +1022,7 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { // Use pure OpenAI format await handleSendMessage({ input: openaiInput, - thread_id: currentThread?.id, + conversation_id: currentConversation?.id, }); } else { // Simple text message using OpenAI format @@ -940,7 +1042,7 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) { await handleSendMessage({ input: openaiInput, - thread_id: currentThread?.id, + conversation_id: currentConversation?.id, }); } @@ -966,7 +1068,9 @@ export function AgentView({ selectedAgent, onDebugEvent }: AgentViewProps) {

- Chat with {selectedAgent.name || selectedAgent.id} + + Chat with {selectedAgent.name || selectedAgent.id} +

- {/* Thread Controls */} + {/* Conversation Controls */}