diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..29aae92645 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: true +contact_links: + - name: Documentation + url: https://aka.ms/agent-framework + about: Check out the official documentation for guides and API reference. + - name: Discussions + url: https://github.com/microsoft/agent-framework/discussions + about: Ask questions about Agent Framework. diff --git a/.github/ISSUE_TEMPLATE/dotnet-issue.yml b/.github/ISSUE_TEMPLATE/dotnet-issue.yml new file mode 100644 index 0000000000..3e02fd9e60 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/dotnet-issue.yml @@ -0,0 +1,70 @@ +name: .NET Bug Report +description: Report a bug in the Agent Framework .NET SDK +title: ".NET: [Bug]: " +labels: ["bug", ".NET"] +type: bug +body: + - type: textarea + id: description + attributes: + label: Description + description: Please provide a clear and detailed description of the bug. + placeholder: | + - What happened? + - What did you expect to happen? + - Steps to reproduce the issue + validations: + required: true + + - type: textarea + id: code-sample + attributes: + label: Code Sample + description: If applicable, provide a minimal code sample that demonstrates the issue. + placeholder: | + ```csharp + // Your code here + ``` + render: markdown + validations: + required: false + + - type: textarea + id: error-messages + attributes: + label: Error Messages / Stack Traces + description: Include any error messages or stack traces you received. + placeholder: | + ``` + Paste error messages or stack traces here + ``` + render: markdown + validations: + required: false + + - type: input + id: dotnet-packages + attributes: + label: Package Versions + description: List the Microsoft.Agents.* packages and versions you are using + placeholder: "e.g., Microsoft.Agents.AI.Abstractions: 1.0.0, Microsoft.Agents.AI.OpenAI: 1.0.0" + validations: + required: true + + - type: input + id: dotnet-version + attributes: + label: .NET Version + description: What version of .NET are you using? + placeholder: "e.g., .NET 8.0" + validations: + required: false + + - type: textarea + id: additional-context + attributes: + label: Additional Context + description: Add any other context or screenshots that might be helpful. + placeholder: "Any additional information..." + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 0000000000..1dc13189e7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,51 @@ +name: Feature Request +description: Request a new feature for Microsoft Agent Framework +title: "[Feature]: " +type: feature +body: + + - type: textarea + id: description + attributes: + label: Description + description: Please describe the feature you'd like and why it would be useful. + placeholder: | + Describe the feature you're requesting: + - What problem does it solve? + - What would the expected behavior be? + - Are there any alternatives you've considered? + validations: + required: true + + - type: textarea + id: code-sample + attributes: + label: Code Sample + description: If applicable, provide a code sample showing how you'd like to use this feature. + placeholder: | + ```python + # Your code here + ``` + + or + + ```csharp + // Your code here + ``` + render: markdown + validations: + required: false + + - type: dropdown + id: language + attributes: + label: Language/SDK + description: Which language/SDK does this feature apply to? + options: + - Both + - .NET + - Python + - Other / Not Applicable + default: 0 + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/python-issue.yml b/.github/ISSUE_TEMPLATE/python-issue.yml new file mode 100644 index 0000000000..3a506c66fe --- /dev/null +++ b/.github/ISSUE_TEMPLATE/python-issue.yml @@ -0,0 +1,70 @@ +name: Python Bug Report +description: Report a bug in the Agent Framework Python SDK +title: "Python: [Bug]: " +labels: ["bug", "Python"] +type: bug +body: + - type: textarea + id: description + attributes: + label: Description + description: Please provide a clear and detailed description of the bug. + placeholder: | + - What happened? + - What did you expect to happen? + - Steps to reproduce the issue + validations: + required: true + + - type: textarea + id: code-sample + attributes: + label: Code Sample + description: If applicable, provide a minimal code sample that demonstrates the issue. + placeholder: | + ```python + # Your code here + ``` + render: markdown + validations: + required: false + + - type: textarea + id: error-messages + attributes: + label: Error Messages / Stack Traces + description: Include any error messages or stack traces you received. + placeholder: | + ``` + Paste error messages or stack traces here + ``` + render: markdown + validations: + required: false + + - type: input + id: python-packages + attributes: + label: Package Versions + description: List the agent-framework-* packages and versions you are using + placeholder: "e.g., agent-framework-core: 1.0.0, agent-framework-azure-ai: 1.0.0" + validations: + required: true + + - type: input + id: python-version + attributes: + label: Python Version + description: What version of Python are you using? + placeholder: "e.g., Python 3.11" + validations: + required: false + + - type: textarea + id: additional-context + attributes: + label: Additional Context + description: Add any other context or screenshots that might be helpful. + placeholder: "Any additional information..." + validations: + required: false diff --git a/.github/workflows/label-issues.yml b/.github/workflows/label-issues.yml index 231ee6833d..111c63ef13 100644 --- a/.github/workflows/label-issues.yml +++ b/.github/workflows/label-issues.yml @@ -45,19 +45,58 @@ jobs: labels.push("triage") } - // Check if the body or the title contains the word 'python' (case-insensitive) - if ((body != null && body.match(/python/i)) || (title != null && title.match(/python/i))) { - // Add the 'python' label to the array - labels.push("python") + // Helper function to extract field value from issue form body + // Issue forms format fields as: ### Field Name\n\nValue + function getFormFieldValue(body, fieldName) { + if (!body) return null + const regex = new RegExp(`###\\s*${fieldName}\\s*\\n\\n([^\\n#]+)`, 'i') + const match = body.match(regex) + return match ? match[1].trim() : null } - // Check if the body or the title contains the words 'dotnet', '.net', 'c#' or 'csharp' (case-insensitive) - if ((body != null && body.match(/.net/i)) || (title != null && title.match(/.net/i)) || - (body != null && body.match(/dotnet/i)) || (title != null && title.match(/dotnet/i)) || - (body != null && body.match(/C#/i)) || (title != null && title.match(/C#/i)) || - (body != null && body.match(/csharp/i)) || (title != null && title.match(/csharp/i))) { - // Add the '.NET' label to the array - labels.push(".NET") + // Check for language from issue form dropdown first + const languageField = getFormFieldValue(body, 'Language') + let languageLabelAdded = false + + if (languageField) { + if (languageField === 'Python') { + labels.push("python") + languageLabelAdded = true + } else if (languageField === '.NET') { + labels.push(".NET") + languageLabelAdded = true + } + // 'None / Not Applicable' - don't add any language label + } + + // Fallback: Check if the body or the title contains the word 'python' (case-insensitive) + // Only if language wasn't already determined from the form field + if (!languageLabelAdded) { + if ((body != null && body.match(/python/i)) || (title != null && title.match(/python/i))) { + // Add the 'python' label to the array + labels.push("python") + } + + // Check if the body or the title contains the words 'dotnet', '.net', 'c#' or 'csharp' (case-insensitive) + if ((body != null && body.match(/\.net/i)) || (title != null && title.match(/\.net/i)) || + (body != null && body.match(/dotnet/i)) || (title != null && title.match(/dotnet/i)) || + (body != null && body.match(/C#/i)) || (title != null && title.match(/C#/i)) || + (body != null && body.match(/csharp/i)) || (title != null && title.match(/csharp/i))) { + // Add the '.NET' label to the array + labels.push(".NET") + } + } + + // Check for issue type from issue form dropdown + const issueTypeField = getFormFieldValue(body, 'Type of Issue') + if (issueTypeField) { + if (issueTypeField === 'Bug') { + labels.push("bug") + } else if (issueTypeField === 'Feature Request') { + labels.push("enhancement") + } else if (issueTypeField === 'Question') { + labels.push("question") + } } // Add the labels to the issue (only if there are labels to add) diff --git a/docs/decisions/0001-agent-run-response.md b/docs/decisions/0001-agent-run-response.md index b60878adff..9f13af787c 100644 --- a/docs/decisions/0001-agent-run-response.md +++ b/docs/decisions/0001-agent-run-response.md @@ -64,7 +64,7 @@ Approaches observed from the compared SDKs: | AutoGen | **Approach 1** Separates messages into Agent-Agent (maps to Primary) and Internal (maps to Secondary) and these are returned as separate properties on the agent response object. See [types of messages](https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/tutorial/messages.html#types-of-messages) and [Response](https://microsoft.github.io/autogen/stable/reference/python/autogen_agentchat.base.html#autogen_agentchat.base.Response) | **Approach 2** Returns a stream of internal events and the last item is a Response object. See [ChatAgent.on_messages_stream](https://microsoft.github.io/autogen/stable/reference/python/autogen_agentchat.base.html#autogen_agentchat.base.ChatAgent.on_messages_stream) | | OpenAI Agent SDK | **Approach 1** Separates new_items (Primary+Secondary) from final output (Primary) as separate properties on the [RunResult](https://github.com/openai/openai-agents-python/blob/main/src/agents/result.py#L39) | **Approach 1** Similar to non-streaming, has a way of streaming updates via a method on the response object which includes all data, and then a separate final output property on the response object which is populated only when the run is complete. See [RunResultStreaming](https://github.com/openai/openai-agents-python/blob/main/src/agents/result.py#L136) | | Google ADK | **Approach 2** [Emits events](https://google.github.io/adk-docs/runtime/#step-by-step-breakdown) with [FinalResponse](https://github.com/google/adk-java/blob/main/core/src/main/java/com/google/adk/events/Event.java#L232) true (Primary) / false (Secondary) and callers have to filter out those with false to get just the final response message | **Approach 2** Similar to non-streaming except [events](https://google.github.io/adk-docs/runtime/#streaming-vs-non-streaming-output-partialtrue) are emitted with [Partial](https://github.com/google/adk-java/blob/main/core/src/main/java/com/google/adk/events/Event.java#L133) true to indicate that they are streaming messages. A final non partial event is also emitted. | -| AWS (Strands) | **Approach 3** Returns an [AgentResult](https://strandsagents.com/latest/api-reference/agent/#strands.agent.agent_result.AgentResult) (Primary) with messages and a reason for the run's completion. | **Approach 2** [Streams events](https://strandsagents.com/latest/api-reference/agent/#strands.agent.agent.Agent.stream_async) (Primary+Secondary) including, response text, current_tool_use, even data from "callbacks" (strands plugins) | +| AWS (Strands) | **Approach 3** Returns an [AgentResult](https://strandsagents.com/latest/documentation/docs/api-reference/python/agent/agent_result/) (Primary) with messages and a reason for the run's completion. | **Approach 2** [Streams events](https://strandsagents.com/latest/documentation/docs/api-reference/python/agent/agent/#strands.agent.agent.Agent.stream_async) (Primary+Secondary) including, response text, current_tool_use, even data from "callbacks" (strands plugins) | | LangGraph | **Approach 2** A mixed list of all [messages](https://langchain-ai.github.io/langgraph/agents/run_agents/#output-format) | **Approach 2** A mixed list of all [messages](https://langchain-ai.github.io/langgraph/agents/run_agents/#output-format) | | Agno | **Combination of various approaches** Returns a [RunResponse](https://docs.agno.com/reference/agents/run-response) object with text content, messages (essentially chat history including inputs and instructions), reasoning and thinking text properties. Secondary events could potentially be extracted from messages. | **Approach 2** Returns [RunResponseEvent](https://docs.agno.com/reference/agents/run-response#runresponseevent-types-and-attributes) objects including tool call, memory update, etc, information, where the [RunResponseCompletedEvent](https://docs.agno.com/reference/agents/run-response#runresponsecompletedevent) has similar properties to RunResponse| | A2A | **Approach 3** Returns a [Task or Message](https://a2aproject.github.io/A2A/latest/specification/#71-messagesend) where the message is the final result (Primary) and task is a reference to a long running process. | **Approach 2** Returns a [stream](https://a2aproject.github.io/A2A/latest/specification/#72-messagestream) that contains task updates (Secondary) and a final message (Primary) | @@ -495,8 +495,8 @@ We need to decide what AIContent types, each agent response type will be mapped | SDK | Structured Outputs support | |-|-| | AutoGen | **Approach 1** Supports [configuring an agent](https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/tutorial/agents.html#structured-output) at agent creation. | -| Google ADK | **Approach 1** Both [input and output shemas can be specified for LLM Agents](https://google.github.io/adk-docs/agents/llm-agents/#structuring-data-input_schema-output_schema-output_key) at construction time. This option is specific to this agent type and other agent types do not necessarily support | -| AWS (Strands) | **Approach 2** Supports a special invocation method called [structured_output](https://strandsagents.com/latest/api-reference/agent/#strands.agent.agent.Agent.structured_output) | +| Google ADK | **Approach 1** Both [input and output schemas can be specified for LLM Agents](https://google.github.io/adk-docs/agents/llm-agents/#structuring-data-input_schema-output_schema-output_key) at construction time. This option is specific to this agent type and other agent types do not necessarily support | +| AWS (Strands) | **Approach 2** Supports a special invocation method called [structured_output](https://strandsagents.com/latest/documentation/docs/api-reference/python/agent/agent/#strands.agent.agent.Agent.structured_output) | | LangGraph | **Approach 1** Supports [configuring an agent](https://langchain-ai.github.io/langgraph/agents/agents/?h=structured#6-configure-structured-output) at agent construction time, and a [structured response](https://langchain-ai.github.io/langgraph/agents/run_agents/#output-format) can be retrieved as a special property on the agent response | | Agno | **Approach 1** Supports [configuring an agent](https://docs.agno.com/examples/getting-started/structured-output) at agent construction time | | A2A | **Informal Approach 2** Doesn't formally support schema negotiation, but [hints can be provided via metadata](https://a2a-protocol.org/latest/specification/#97-structured-data-exchange-requesting-and-providing-json) at invocation time | @@ -508,7 +508,7 @@ We need to decide what AIContent types, each agent response type will be mapped |-|-| | AutoGen | Supports a [stop reason](https://microsoft.github.io/autogen/stable/reference/python/autogen_agentchat.base.html#autogen_agentchat.base.TaskResult.stop_reason) which is a freeform text string | | Google ADK | [No equivalent present](https://github.com/google/adk-python/blob/main/src/google/adk/events/event.py) | -| AWS (Strands) | Exposes a [stop_reason](https://strandsagents.com/latest/api-reference/types/#strands.types.event_loop.StopReason) property on the [AgentResult](https://strandsagents.com/latest/api-reference/agent/#strands.agent.agent_result.AgentResult) class with options that are tied closely to LLM operations. | +| AWS (Strands) | Exposes a [stop_reason](https://strandsagents.com/latest/documentation/docs/api-reference/python/types/event_loop/#strands.types.event_loop.StopReason) property on the [AgentResult](https://strandsagents.com/latest/documentation/docs/api-reference/python/agent/agent_result/) class with options that are tied closely to LLM operations. | | LangGraph | No equivalent present, output contains only [messages](https://langchain-ai.github.io/langgraph/agents/run_agents/#output-format) | | Agno | [No equivalent present](https://docs.agno.com/reference/agents/run-response) | | A2A | No equivalent present, response only contains a [message](https://a2a-protocol.org/latest/specification/#64-message-object) or [task](https://a2a-protocol.org/latest/specification/#61-task-object). | diff --git a/dotnet/Directory.Packages.props b/dotnet/Directory.Packages.props index 9202b36f2a..5ee419114b 100644 --- a/dotnet/Directory.Packages.props +++ b/dotnet/Directory.Packages.props @@ -11,7 +11,7 @@ - + @@ -26,7 +26,7 @@ - + @@ -100,7 +100,7 @@ - + diff --git a/dotnet/nuget/nuget-package.props b/dotnet/nuget/nuget-package.props index 3fc7c3974b..4982c91ce5 100644 --- a/dotnet/nuget/nuget-package.props +++ b/dotnet/nuget/nuget-package.props @@ -2,9 +2,9 @@ 1.0.0 - $(VersionPrefix)-$(VersionSuffix).251219.1 - $(VersionPrefix)-preview.251219.1 - 1.0.0-preview.251219.1 + $(VersionPrefix)-$(VersionSuffix).260108.1 + $(VersionPrefix)-preview.260108.1 + 1.0.0-preview.260108.1 Debug;Release;Publish true diff --git a/dotnet/samples/AzureFunctions/08_ReliableStreaming/FunctionTriggers.cs b/dotnet/samples/AzureFunctions/08_ReliableStreaming/FunctionTriggers.cs index e642b64337..a6d3e9db55 100644 --- a/dotnet/samples/AzureFunctions/08_ReliableStreaming/FunctionTriggers.cs +++ b/dotnet/samples/AzureFunctions/08_ReliableStreaming/FunctionTriggers.cs @@ -96,16 +96,15 @@ public async Task CreateAsync( // Create a new agent thread AgentThread thread = agentProxy.GetNewThread(); - AgentThreadMetadata metadata = thread.GetService() - ?? throw new InvalidOperationException("Failed to get AgentThreadMetadata from new thread."); + string agentSessionId = thread.GetService().ToString(); - this._logger.LogInformation("Creating new agent session: {ConversationId}", metadata.ConversationId); + this._logger.LogInformation("Creating new agent session: {AgentSessionId}", agentSessionId); // Run the agent in the background (fire-and-forget) DurableAgentRunOptions options = new() { IsFireAndForget = true }; await agentProxy.RunAsync(prompt, thread, options, cancellationToken); - this._logger.LogInformation("Agent run started for session: {ConversationId}", metadata.ConversationId); + this._logger.LogInformation("Agent run started for session: {AgentSessionId}", agentSessionId); // Check Accept header to determine response format // text/plain = raw text output (ideal for terminals) @@ -114,7 +113,7 @@ public async Task CreateAsync( bool useSseFormat = acceptHeader?.Contains("text/plain", StringComparison.OrdinalIgnoreCase) != true; return await this.StreamToClientAsync( - conversationId: metadata.ConversationId!, cursor: null, useSseFormat, request.HttpContext, cancellationToken); + conversationId: agentSessionId, cursor: null, useSseFormat, request.HttpContext, cancellationToken); } /// diff --git a/dotnet/samples/AzureFunctions/08_ReliableStreaming/RedisStreamResponseHandler.cs b/dotnet/samples/AzureFunctions/08_ReliableStreaming/RedisStreamResponseHandler.cs index b0a95f49f6..21f944338a 100644 --- a/dotnet/samples/AzureFunctions/08_ReliableStreaming/RedisStreamResponseHandler.cs +++ b/dotnet/samples/AzureFunctions/08_ReliableStreaming/RedisStreamResponseHandler.cs @@ -65,11 +65,10 @@ public async ValueTask OnStreamingResponseUpdateAsync( "DurableAgentContext.Current is not set. This handler must be used within a durable agent context."); } - // Get conversation ID from the current thread context, which is only available in the context of + // Get session ID from the current thread context, which is only available in the context of // a durable agent execution. - string conversationId = context.CurrentThread.GetService()?.ConversationId - ?? throw new InvalidOperationException("Unable to determine conversation ID from the current thread."); - string streamKey = GetStreamKey(conversationId); + string agentSessionId = context.CurrentThread.GetService().ToString(); + string streamKey = GetStreamKey(agentSessionId); IDatabase db = this._redis.GetDatabase(); int sequenceNumber = 0; diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs index a4e588f347..6beef64405 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs @@ -44,11 +44,19 @@ protected override async Task RunCoreAsync(IEnumerable responseMessages = CloneAndToUpperCase(messages, this.Name).ToList(); // Notify the thread of the input and output messages. - await typedThread.MessageStore.AddMessagesAsync(messages.Concat(responseMessages), cancellationToken); + var invokedContext = new ChatMessageStore.InvokedContext(messages, storeMessages) + { + ResponseMessages = responseMessages + }; + await typedThread.MessageStore.InvokedAsync(invokedContext, cancellationToken); return new AgentRunResponse { @@ -68,11 +76,19 @@ protected override async IAsyncEnumerable RunCoreStreami throw new ArgumentException($"The provided thread is not of type {nameof(CustomAgentThread)}.", nameof(thread)); } + // Get existing messages from the store + var invokingContext = new ChatMessageStore.InvokingContext(messages); + var storeMessages = await typedThread.MessageStore.InvokingAsync(invokingContext, cancellationToken); + // Clone the input messages and turn them into response messages with upper case text. List responseMessages = CloneAndToUpperCase(messages, this.Name).ToList(); // Notify the thread of the input and output messages. - await typedThread.MessageStore.AddMessagesAsync(messages.Concat(responseMessages), cancellationToken); + var invokedContext = new ChatMessageStore.InvokedContext(messages, storeMessages) + { + ResponseMessages = responseMessages + }; + await typedThread.MessageStore.InvokedAsync(invokedContext, cancellationToken); foreach (var message in responseMessages) { diff --git a/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step01_BasicTextRAG/Program.cs b/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step01_BasicTextRAG/Program.cs index 42015d87cd..9207a08182 100644 --- a/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step01_BasicTextRAG/Program.cs +++ b/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step01_BasicTextRAG/Program.cs @@ -62,7 +62,12 @@ .CreateAIAgent(new ChatClientAgentOptions { ChatOptions = new() { Instructions = "You are a helpful support specialist for Contoso Outdoors. Answer questions using the provided context and cite the source document when available." }, - AIContextProviderFactory = ctx => new TextSearchProvider(SearchAdapter, ctx.SerializedState, ctx.JsonSerializerOptions, textSearchOptions) + AIContextProviderFactory = ctx => new TextSearchProvider(SearchAdapter, ctx.SerializedState, ctx.JsonSerializerOptions, textSearchOptions), + // Since we are using ChatCompletion which stores chat history locally, we can also add a message removal policy + // that removes messages produced by the TextSearchProvider before they are added to the chat history, so that + // we don't bloat chat history with all the search result messages. + ChatMessageStoreFactory = ctx => new InMemoryChatMessageStore(ctx.SerializedState, ctx.JsonSerializerOptions) + .WithAIContextProviderMessageRemoval(), }); AgentThread thread = agent.GetNewThread(); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs index e9794e871a..280c84dc0d 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs @@ -89,24 +89,7 @@ public VectorChatMessageStore(VectorStore vectorStore, JsonElement serializedSto public string? ThreadDbKey { get; private set; } - public override async Task AddMessagesAsync(IEnumerable messages, CancellationToken cancellationToken = default) - { - this.ThreadDbKey ??= Guid.NewGuid().ToString("N"); - - var collection = this._vectorStore.GetCollection("ChatHistory"); - await collection.EnsureCollectionExistsAsync(cancellationToken); - - await collection.UpsertAsync(messages.Select(x => new ChatHistoryItem() - { - Key = this.ThreadDbKey + x.MessageId, - Timestamp = DateTimeOffset.UtcNow, - ThreadId = this.ThreadDbKey, - SerializedMessage = JsonSerializer.Serialize(x), - MessageText = x.Text - }), cancellationToken); - } - - public override async Task> GetMessagesAsync(CancellationToken cancellationToken = default) + public override async ValueTask> InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default) { var collection = this._vectorStore.GetCollection("ChatHistory"); await collection.EnsureCollectionExistsAsync(cancellationToken); @@ -124,6 +107,33 @@ public override async Task> GetMessagesAsync(Cancellati return messages; } + public override async ValueTask InvokedAsync(InvokedContext context, CancellationToken cancellationToken = default) + { + // Don't store messages if the request failed. + if (context.InvokeException is not null) + { + return; + } + + this.ThreadDbKey ??= Guid.NewGuid().ToString("N"); + + var collection = this._vectorStore.GetCollection("ChatHistory"); + await collection.EnsureCollectionExistsAsync(cancellationToken); + + // Add both request and response messages to the store + // Optionally messages produced by the AIContextProvider can also be persisted (not shown). + var allNewMessages = context.RequestMessages.Concat(context.AIContextProviderMessages ?? []).Concat(context.ResponseMessages ?? []); + + await collection.UpsertAsync(allNewMessages.Select(x => new ChatHistoryItem() + { + Key = this.ThreadDbKey + x.MessageId, + Timestamp = DateTimeOffset.UtcNow, + ThreadId = this.ThreadDbKey, + SerializedMessage = JsonSerializer.Serialize(x), + MessageText = x.Text + }), cancellationToken); + } + public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptions = null) => // We have to serialize the thread id, so that on deserialization we can retrieve the messages using the same thread id. JsonSerializer.SerializeToElement(this.ThreadDbKey); diff --git a/dotnet/samples/HostedAgents/AgentWithHostedMCP/AgentWithHostedMCP.csproj b/dotnet/samples/HostedAgents/AgentWithHostedMCP/AgentWithHostedMCP.csproj index d2c0ea70f8..1244b81542 100644 --- a/dotnet/samples/HostedAgents/AgentWithHostedMCP/AgentWithHostedMCP.csproj +++ b/dotnet/samples/HostedAgents/AgentWithHostedMCP/AgentWithHostedMCP.csproj @@ -39,8 +39,8 @@ - - + + diff --git a/dotnet/samples/HostedAgents/AgentWithHostedMCP/Program.cs b/dotnet/samples/HostedAgents/AgentWithHostedMCP/Program.cs index 9cbea8b73a..4dffdf92bd 100644 --- a/dotnet/samples/HostedAgents/AgentWithHostedMCP/Program.cs +++ b/dotnet/samples/HostedAgents/AgentWithHostedMCP/Program.cs @@ -9,7 +9,7 @@ using Azure.Identity; using Microsoft.Agents.AI; using Microsoft.Extensions.AI; -using OpenAI; +using OpenAI.Responses; var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; @@ -25,7 +25,7 @@ AIAgent agent = new AzureOpenAIClient( new Uri(endpoint), new DefaultAzureCredential()) - .GetOpenAIResponseClient(deploymentName) + .GetResponsesClient(deploymentName) .CreateAIAgent( instructions: "You answer questions by searching the Microsoft Learn content only.", name: "MicrosoftLearnAgent", diff --git a/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj b/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj index 54791c1992..03ffaf1824 100644 --- a/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj +++ b/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj @@ -38,8 +38,8 @@ - - + + diff --git a/dotnet/samples/HostedAgents/AgentWithTextSearchRag/Program.cs b/dotnet/samples/HostedAgents/AgentWithTextSearchRag/Program.cs index d8be12c5b5..b197ffeefc 100644 --- a/dotnet/samples/HostedAgents/AgentWithTextSearchRag/Program.cs +++ b/dotnet/samples/HostedAgents/AgentWithTextSearchRag/Program.cs @@ -8,9 +8,8 @@ using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; -using Microsoft.Agents.AI.Data; using Microsoft.Extensions.AI; -using OpenAI; +using OpenAI.Chat; var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; @@ -28,7 +27,10 @@ .GetChatClient(deploymentName) .CreateAIAgent(new ChatClientAgentOptions { - Instructions = "You are a helpful support specialist for Contoso Outdoors. Answer questions using the provided context and cite the source document when available.", + ChatOptions = new ChatOptions + { + Instructions = "You are a helpful support specialist for Contoso Outdoors. Answer questions using the provided context and cite the source document when available.", + }, AIContextProviderFactory = ctx => new TextSearchProvider(MockSearchAsync, ctx.SerializedState, ctx.JsonSerializerOptions, textSearchOptions) }); diff --git a/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj b/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj index ad23b11b17..a434e07d33 100644 --- a/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj +++ b/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj @@ -38,7 +38,7 @@ - + diff --git a/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs b/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs index cf88a89177..96a8856dea 100644 --- a/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI.A2A/A2AAgent.cs @@ -24,7 +24,7 @@ namespace Microsoft.Agents.AI.A2A; /// Support for tasks will be added later as part of the long-running /// executions work. /// -internal sealed class A2AAgent : AIAgent +public sealed class A2AAgent : AIAgent { private readonly A2AClient _a2aClient; private readonly string? _id; @@ -84,9 +84,13 @@ protected override async Task RunCoreAsync(IEnumerable RunCoreStreami // a2aSseEvents = this._a2aClient.SubscribeToTaskAsync(token.TaskId, cancellationToken).ConfigureAwait(false); } - var a2aMessage = CreateA2AMessage(typedThread, messages); + MessageSendParams sendParams = new() + { + Message = CreateA2AMessage(typedThread, messages), + Metadata = options?.AdditionalProperties?.ToA2AMetadata() + }; - a2aSseEvents = this._a2aClient.SendMessageStreamingAsync(new MessageSendParams { Message = a2aMessage }, cancellationToken).ConfigureAwait(false); + a2aSseEvents = this._a2aClient.SendMessageStreamingAsync(sendParams, cancellationToken).ConfigureAwait(false); this._logger.LogAgentChatClientInvokedAgent(nameof(RunStreamingAsync), this.Id, this.Name); @@ -198,10 +206,10 @@ protected override async IAsyncEnumerable RunCoreStreami protected override string? IdCore => this._id; /// - public override string? Name => this._name ?? base.Name; + public override string? Name => this._name; /// - public override string? Description => this._description ?? base.Description; + public override string? Description => this._description; private A2AAgentThread GetA2AThread(AgentThread? thread, AgentRunOptions? options) { diff --git a/dotnet/src/Microsoft.Agents.AI.A2A/Extensions/A2AMetadataExtensions.cs b/dotnet/src/Microsoft.Agents.AI.A2A/Extensions/A2AMetadataExtensions.cs index c0dedbd541..3c81c6abe8 100644 --- a/dotnet/src/Microsoft.Agents.AI.A2A/Extensions/A2AMetadataExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.A2A/Extensions/A2AMetadataExtensions.cs @@ -14,6 +14,9 @@ internal static class A2AMetadataExtensions /// /// Converts a dictionary of metadata to an . /// + /// + /// This method can be replaced by the one from A2A SDK once it is public. + /// /// The metadata dictionary to convert. /// The converted , or null if the input is null or empty. internal static AdditionalPropertiesDictionary? ToAdditionalProperties(this Dictionary? metadata) diff --git a/dotnet/src/Microsoft.Agents.AI.A2A/Extensions/AdditionalPropertiesDictionaryExtensions.cs b/dotnet/src/Microsoft.Agents.AI.A2A/Extensions/AdditionalPropertiesDictionaryExtensions.cs new file mode 100644 index 0000000000..a3340d2ca8 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.A2A/Extensions/AdditionalPropertiesDictionaryExtensions.cs @@ -0,0 +1,44 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Text.Json; +using Microsoft.Agents.AI; + +namespace Microsoft.Extensions.AI; + +/// +/// Extension methods for AdditionalPropertiesDictionary. +/// +internal static class AdditionalPropertiesDictionaryExtensions +{ + /// + /// Converts an to a dictionary of values suitable for A2A metadata. + /// + /// + /// This method can be replaced by the one from A2A SDK once it is available. + /// + /// The additional properties dictionary to convert, or null. + /// A dictionary of JSON elements representing the metadata, or null if the input is null or empty. + internal static Dictionary? ToA2AMetadata(this AdditionalPropertiesDictionary? additionalProperties) + { + if (additionalProperties is not { Count: > 0 }) + { + return null; + } + + var metadata = new Dictionary(); + + foreach (var kvp in additionalProperties) + { + if (kvp.Value is JsonElement) + { + metadata[kvp.Key] = (JsonElement)kvp.Value!; + continue; + } + + metadata[kvp.Key] = JsonSerializer.SerializeToElement(kvp.Value, A2AJsonUtilities.DefaultOptions.GetTypeInfo(typeof(object))); + } + + return metadata; + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentRunResponse.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentRunResponse.cs index 001cfd9469..7828b5c62d 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentRunResponse.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentRunResponse.cs @@ -291,6 +291,15 @@ public AgentRunResponseUpdate[] ToAgentRunResponseUpdates() return updates; } + /// + /// Deserializes the response text into the given type. + /// + /// The output type to deserialize into. + /// The result as the requested type. + /// The result is not parsable into the requested type. + public T Deserialize() => + this.Deserialize(AgentAbstractionsJsonUtilities.DefaultOptions); + /// /// Deserializes the response text into the given type using the specified serializer options. /// @@ -311,6 +320,15 @@ public T Deserialize(JsonSerializerOptions serializerOptions) }; } + /// + /// Tries to deserialize response text into the given type. + /// + /// The output type to deserialize into. + /// The parsed structured output. + /// if parsing was successful; otherwise, . + public bool TryDeserialize([NotNullWhen(true)] out T? structuredOutput) => + this.TryDeserialize(AgentAbstractionsJsonUtilities.DefaultOptions, out structuredOutput); + /// /// Tries to deserialize response text into the given type using the specified serializer options. /// diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentThread.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentThread.cs index 4794457f41..0a3301d05f 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentThread.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentThread.cs @@ -68,7 +68,7 @@ public virtual JsonElement Serialize(JsonSerializerOptions? jsonSerializerOption /// is . /// /// The purpose of this method is to allow for the retrieval of strongly-typed services that might be provided by the , - /// including itself or any services it might be wrapping. For example, to access the for the instance, + /// including itself or any services it might be wrapping. For example, to access a if available for the instance, /// may be used to request it. /// public virtual object? GetService(Type serviceType, object? serviceKey = null) diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentThreadMetadata.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentThreadMetadata.cs deleted file mode 100644 index 3a2d506745..0000000000 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentThreadMetadata.cs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -using System.Diagnostics; - -namespace Microsoft.Agents.AI; - -/// -/// Provides metadata information about an instance. -/// -[DebuggerDisplay("ConversationId = {ConversationId}")] -public class AgentThreadMetadata -{ - /// - /// Initializes a new instance of the class. - /// - /// The unique identifier for the conversation, if available. - public AgentThreadMetadata(string? conversationId) - { - this.ConversationId = conversationId; - } - - /// - /// Gets the unique identifier for the conversation, if available. - /// - /// - /// The meaning of this ID may vary depending on the agent implementation. - /// - public string? ConversationId { get; } -} diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStore.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStore.cs index 9f89031464..d28cd191b7 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStore.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStore.cs @@ -32,8 +32,9 @@ namespace Microsoft.Agents.AI; public abstract class ChatMessageStore { /// - /// Asynchronously retrieves all messages from the store that should be provided as context for the next agent invocation. + /// Called at the start of agent invocation to retrieve all messages from the store that should be provided as context for the next agent invocation. /// + /// Contains the request context including the caller provided messages that will be used by the agent for this invocation. /// The to monitor for cancellation requests. The default is . /// /// A task that represents the asynchronous operation. The task result contains a collection of @@ -59,20 +60,19 @@ public abstract class ChatMessageStore /// and context management. /// /// - public abstract Task> GetMessagesAsync(CancellationToken cancellationToken = default); + public abstract ValueTask> InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default); /// - /// Asynchronously adds new messages to the store. + /// Called at the end of the agent invocation to add new messages to the store. /// - /// The collection of chat messages to add to the store. + /// Contains the invocation context including request messages, response messages, and any exception that occurred. /// The to monitor for cancellation requests. The default is . /// A task that represents the asynchronous add operation. - /// is . /// /// /// Messages should be added in the order they were generated to maintain proper chronological sequence. /// The store is responsible for preserving message ordering and ensuring that subsequent calls to - /// return messages in the correct chronological order. + /// return messages in the correct chronological order. /// /// /// Implementations may perform additional processing during message addition, such as: @@ -83,8 +83,12 @@ public abstract class ChatMessageStore /// Updating indices or search capabilities /// /// + /// + /// This method is called regardless of whether the invocation succeeded or failed. + /// To check if the invocation was successful, inspect the property. + /// /// - public abstract Task AddMessagesAsync(IEnumerable messages, CancellationToken cancellationToken = default); + public abstract ValueTask InvokedAsync(InvokedContext context, CancellationToken cancellationToken = default); /// /// Serializes the current object's state to a using the specified serialization options. @@ -121,4 +125,100 @@ public abstract class ChatMessageStore /// public TService? GetService(object? serviceKey = null) => this.GetService(typeof(TService), serviceKey) is TService service ? service : default; + + /// + /// Contains the context information provided to . + /// + /// + /// This class provides context about the invocation before the messages are retrieved from the store, + /// including the new messages that will be used. Stores can use this information to determine what + /// messages should be retrieved for the invocation. + /// + public sealed class InvokingContext + { + /// + /// Initializes a new instance of the class with the specified request messages. + /// + /// The new messages to be used by the agent for this invocation. + /// is . + public InvokingContext(IEnumerable requestMessages) + { + this.RequestMessages = requestMessages ?? throw new ArgumentNullException(nameof(requestMessages)); + } + + /// + /// Gets the caller provided messages that will be used by the agent for this invocation. + /// + /// + /// A collection of instances representing new messages that were provided by the caller. + /// + public IEnumerable RequestMessages { get; } + } + + /// + /// Contains the context information provided to . + /// + /// + /// This class provides context about a completed agent invocation, including both the + /// request messages that were used and the response messages that were generated. It also indicates + /// whether the invocation succeeded or failed. + /// + public sealed class InvokedContext + { + /// + /// Initializes a new instance of the class with the specified request messages. + /// + /// The caller provided messages that were used by the agent for this invocation. + /// The messages retrieved from the for this invocation. + /// is . + public InvokedContext(IEnumerable requestMessages, IEnumerable chatMessageStoreMessages) + { + this.RequestMessages = Throw.IfNull(requestMessages); + this.ChatMessageStoreMessages = chatMessageStoreMessages; + } + + /// + /// Gets the caller provided messages that were used by the agent for this invocation. + /// + /// + /// A collection of instances representing new messages that were provided by the caller. + /// This does not include any supplied messages. + /// + public IEnumerable RequestMessages { get; } + + /// + /// Gets the messages retrieved from the for this invocation, if any. + /// + /// + /// A collection of instances that were retrieved from the , + /// and were used by the agent as part of the invocation. + /// + public IEnumerable ChatMessageStoreMessages { get; } + + /// + /// Gets or sets the messages provided by the for this invocation, if any. + /// + /// + /// A collection of instances that were provided by the , + /// and were used by the agent as part of the invocation. + /// + public IEnumerable? AIContextProviderMessages { get; set; } + + /// + /// Gets the collection of response messages generated during this invocation if the invocation succeeded. + /// + /// + /// A collection of instances representing the response, + /// or if the invocation failed or did not produce response messages. + /// + public IEnumerable? ResponseMessages { get; set; } + + /// + /// Gets the that was thrown during the invocation, if the invocation failed. + /// + /// + /// The exception that caused the invocation to fail, or if the invocation succeeded. + /// + public Exception? InvokeException { get; set; } + } } diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreExtensions.cs new file mode 100644 index 0000000000..a205fc1d9e --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreExtensions.cs @@ -0,0 +1,50 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI; + +/// +/// Contains extension methods for the class. +/// +public static class ChatMessageStoreExtensions +{ + /// + /// Adds message filtering to an existing store, so that messages passed to the store and messages produced by the store + /// can be filtered, updated or replaced. + /// + /// The store to add the message filter to. + /// An optional filter function to apply to messages produced by the store. If null, no filter is applied at this + /// stage. + /// An optional filter function to apply to the invoked context messages before they are passed to the store. If null, no + /// filter is applied at this stage. + /// The with filtering applied. + public static ChatMessageStore WithMessageFilters( + this ChatMessageStore store, + Func, IEnumerable>? invokingMessagesFilter = null, + Func? invokedMessagesFilter = null) + { + return new ChatMessageStoreMessageFilter( + innerChatMessageStore: store, + invokingMessagesFilter: invokingMessagesFilter, + invokedMessagesFilter: invokedMessagesFilter); + } + + /// + /// Decorates the provided chat message store so that it does not store messages produced by any . + /// + /// The store to add the message filter to. + /// A new instance that filters out messages so they do not get stored. + public static ChatMessageStore WithAIContextProviderMessageRemoval(this ChatMessageStore store) + { + return new ChatMessageStoreMessageFilter( + innerChatMessageStore: store, + invokedMessagesFilter: (ctx) => + { + ctx.AIContextProviderMessages = null; + return ctx; + }); + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreMessageFilter.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreMessageFilter.cs new file mode 100644 index 0000000000..e58f233067 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreMessageFilter.cs @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.AI; +using Microsoft.Shared.Diagnostics; + +namespace Microsoft.Agents.AI; + +/// +/// A decorator that allows filtering the messages +/// passed into and out of an inner . +/// +public sealed class ChatMessageStoreMessageFilter : ChatMessageStore +{ + private readonly ChatMessageStore _innerChatMessageStore; + private readonly Func, IEnumerable>? _invokingMessagesFilter; + private readonly Func? _invokedMessagesFilter; + + /// + /// Initializes a new instance of the class. + /// + /// Use this constructor to customize how messages are filtered before and after invocation by + /// providing appropriate filter functions. If no filters are provided, the message store operates without + /// additional filtering. + /// The underlying chat message store to be wrapped. Cannot be null. + /// An optional filter function to apply to messages before they are invoked. If null, no filter is applied at this + /// stage. + /// An optional filter function to apply to the invocation context after messages have been invoked. If null, no + /// filter is applied at this stage. + /// Thrown if innerChatMessageStore is null. + public ChatMessageStoreMessageFilter( + ChatMessageStore innerChatMessageStore, + Func, IEnumerable>? invokingMessagesFilter = null, + Func? invokedMessagesFilter = null) + { + this._innerChatMessageStore = Throw.IfNull(innerChatMessageStore); + + if (invokingMessagesFilter == null && invokedMessagesFilter == null) + { + throw new ArgumentException("At least one filter function, invokingMessagesFilter or invokedMessagesFilter, must be provided."); + } + + this._invokingMessagesFilter = invokingMessagesFilter; + this._invokedMessagesFilter = invokedMessagesFilter; + } + + /// + public override async ValueTask> InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default) + { + var messages = await this._innerChatMessageStore.InvokingAsync(context, cancellationToken).ConfigureAwait(false); + return this._invokingMessagesFilter != null ? this._invokingMessagesFilter(messages) : messages; + } + + /// + public override ValueTask InvokedAsync(InvokedContext context, CancellationToken cancellationToken = default) + { + if (this._invokedMessagesFilter != null) + { + context = this._invokedMessagesFilter(context); + } + + return this._innerChatMessageStore.InvokedAsync(context, cancellationToken); + } + + /// + public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptions = null) + { + return this._innerChatMessageStore.Serialize(jsonSerializerOptions); + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryChatMessageStore.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryChatMessageStore.cs index 79d303207c..f7f4522f8f 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryChatMessageStore.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryChatMessageStore.cs @@ -134,27 +134,36 @@ public ChatMessage this[int index] } /// - public override async Task AddMessagesAsync(IEnumerable messages, CancellationToken cancellationToken = default) + public override async ValueTask> InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default) { - _ = Throw.IfNull(messages); + _ = Throw.IfNull(context); - this._messages.AddRange(messages); - - if (this.ReducerTriggerEvent is ChatReducerTriggerEvent.AfterMessageAdded && this.ChatReducer is not null) + if (this.ReducerTriggerEvent is ChatReducerTriggerEvent.BeforeMessagesRetrieval && this.ChatReducer is not null) { this._messages = (await this.ChatReducer.ReduceAsync(this._messages, cancellationToken).ConfigureAwait(false)).ToList(); } + + return this._messages; } /// - public override async Task> GetMessagesAsync(CancellationToken cancellationToken = default) + public override async ValueTask InvokedAsync(InvokedContext context, CancellationToken cancellationToken = default) { - if (this.ReducerTriggerEvent is ChatReducerTriggerEvent.BeforeMessagesRetrieval && this.ChatReducer is not null) + _ = Throw.IfNull(context); + + if (context.InvokeException is not null) { - this._messages = (await this.ChatReducer.ReduceAsync(this._messages, cancellationToken).ConfigureAwait(false)).ToList(); + return; } - return this._messages; + // Add request, AI context provider, and response messages to the store + var allNewMessages = context.RequestMessages.Concat(context.AIContextProviderMessages ?? []).Concat(context.ResponseMessages ?? []); + this._messages.AddRange(allNewMessages); + + if (this.ReducerTriggerEvent is ChatReducerTriggerEvent.AfterMessageAdded && this.ChatReducer is not null) + { + this._messages = (await this.ChatReducer.ReduceAsync(this._messages, cancellationToken).ConfigureAwait(false)).ToList(); + } } /// @@ -221,7 +230,7 @@ public enum ChatReducerTriggerEvent { /// /// Trigger the reducer when a new message is added. - /// will only complete when reducer processing is done. + /// will only complete when reducer processing is done. /// AfterMessageAdded, diff --git a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatMessageStore.cs b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatMessageStore.cs index 03334d90f9..5c2c23ff9e 100644 --- a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatMessageStore.cs +++ b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatMessageStore.cs @@ -287,7 +287,7 @@ public static CosmosChatMessageStore CreateFromSerializedState(CosmosClient cosm } /// - public override async Task> GetMessagesAsync(CancellationToken cancellationToken = default) + public override async ValueTask> InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default) { #pragma warning disable CA1513 // Use ObjectDisposedException.ThrowIf - not available on all target frameworks if (this._disposed) @@ -347,11 +347,14 @@ public override async Task> GetMessagesAsync(Cancellati } /// - public override async Task AddMessagesAsync(IEnumerable messages, CancellationToken cancellationToken = default) + public override async ValueTask InvokedAsync(InvokedContext context, CancellationToken cancellationToken = default) { - if (messages is null) + Throw.IfNull(context); + + if (context.InvokeException is not null) { - throw new ArgumentNullException(nameof(messages)); + // Do not store messages if there was an exception during invocation + return; } #pragma warning disable CA1513 // Use ObjectDisposedException.ThrowIf - not available on all target frameworks @@ -361,7 +364,7 @@ public override async Task AddMessagesAsync(IEnumerable messages, C } #pragma warning restore CA1513 - var messageList = messages as IReadOnlyCollection ?? messages.ToList(); + var messageList = context.RequestMessages.Concat(context.AIContextProviderMessages ?? []).Concat(context.ResponseMessages ?? []).ToList(); if (messageList.Count == 0) { return; @@ -381,7 +384,7 @@ public override async Task AddMessagesAsync(IEnumerable messages, C /// /// Adds multiple messages using transactional batch operations for atomicity. /// - private async Task AddMessagesInBatchAsync(IReadOnlyCollection messages, CancellationToken cancellationToken) + private async Task AddMessagesInBatchAsync(List messages, CancellationToken cancellationToken) { var currentTimestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(); diff --git a/dotnet/src/Microsoft.Agents.AI.DurableTask/CHANGELOG.md b/dotnet/src/Microsoft.Agents.AI.DurableTask/CHANGELOG.md index ccc6aa7181..8f8f64fe5c 100644 --- a/dotnet/src/Microsoft.Agents.AI.DurableTask/CHANGELOG.md +++ b/dotnet/src/Microsoft.Agents.AI.DurableTask/CHANGELOG.md @@ -6,6 +6,7 @@ - Added TTL configuration for durable agent entities ([#2679](https://github.com/microsoft/agent-framework/pull/2679)) - Switch to new "Run" method name ([#2843](https://github.com/microsoft/agent-framework/pull/2843)) +- Removed AgentThreadMetadata and used AgentSessionId directly instead ([#3067](https://github.com/microsoft/agent-framework/pull/3067)); ## v1.0.0-preview.251204.1 diff --git a/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAgentThread.cs b/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAgentThread.cs index 32dea2cb18..98dc8ea4b1 100644 --- a/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAgentThread.cs +++ b/dotnet/src/Microsoft.Agents.AI.DurableTask/DurableAgentThread.cs @@ -55,12 +55,6 @@ internal static DurableAgentThread Deserialize(JsonElement serializedThread, Jso /// public override object? GetService(Type serviceType, object? serviceKey = null) { - // This is a common convention for MAF agents. - if (serviceType == typeof(AgentThreadMetadata)) - { - return new AgentThreadMetadata(conversationId: this.SessionId.ToString()); - } - if (serviceType == typeof(AgentSessionId)) { return this.SessionId; diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/AIAgentExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/AIAgentExtensions.cs index c54af66bb8..499d724b1a 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/AIAgentExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/AIAgentExtensions.cs @@ -43,10 +43,14 @@ async Task OnMessageReceivedAsync(MessageSendParams messageSendPara { var contextId = messageSendParams.Message.ContextId ?? Guid.NewGuid().ToString("N"); var thread = await hostAgent.GetOrCreateThreadAsync(contextId, cancellationToken).ConfigureAwait(false); + var options = messageSendParams.Metadata is not { Count: > 0 } + ? null + : new AgentRunOptions { AdditionalProperties = messageSendParams.Metadata.ToAdditionalProperties() }; var response = await hostAgent.RunAsync( messageSendParams.ToChatMessages(), thread: thread, + options: options, cancellationToken: cancellationToken).ConfigureAwait(false); await hostAgent.SaveThreadAsync(contextId, thread, cancellationToken).ConfigureAwait(false); @@ -56,7 +60,8 @@ async Task OnMessageReceivedAsync(MessageSendParams messageSendPara MessageId = response.ResponseId ?? Guid.NewGuid().ToString("N"), ContextId = contextId, Role = MessageRole.Agent, - Parts = parts + Parts = parts, + Metadata = response.AdditionalProperties?.ToA2AMetadata() }; } } diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/Converters/A2AMetadataExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/Converters/A2AMetadataExtensions.cs new file mode 100644 index 0000000000..010264bb65 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/Converters/A2AMetadataExtensions.cs @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Text.Json; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.Hosting.A2A.Converters; + +/// +/// Extension methods for A2A metadata dictionary. +/// +internal static class A2AMetadataExtensions +{ + /// + /// Converts a dictionary of metadata to an . + /// + /// + /// This method can be replaced by the one from A2A SDK once it is public. + /// + /// The metadata dictionary to convert. + /// The converted , or null if the input is null or empty. + internal static AdditionalPropertiesDictionary? ToAdditionalProperties(this Dictionary? metadata) + { + if (metadata is not { Count: > 0 }) + { + return null; + } + + var additionalProperties = new AdditionalPropertiesDictionary(); + foreach (var kvp in metadata) + { + additionalProperties[kvp.Key] = kvp.Value; + } + return additionalProperties; + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/Converters/AdditionalPropertiesDictionaryExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/Converters/AdditionalPropertiesDictionaryExtensions.cs new file mode 100644 index 0000000000..d46ef72d1f --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/Converters/AdditionalPropertiesDictionaryExtensions.cs @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Text.Json; +using A2A; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.Hosting.A2A.Converters; + +/// +/// Extension methods for AdditionalPropertiesDictionary. +/// +internal static class AdditionalPropertiesDictionaryExtensions +{ + /// + /// Converts an to a dictionary of values suitable for A2A metadata. + /// + /// + /// This method can be replaced by the one from A2A SDK once it is available. + /// + /// The additional properties dictionary to convert, or null. + /// A dictionary of JSON elements representing the metadata, or null if the input is null or empty. + internal static Dictionary? ToA2AMetadata(this AdditionalPropertiesDictionary? additionalProperties) + { + if (additionalProperties is not { Count: > 0 }) + { + return null; + } + + var metadata = new Dictionary(); + + foreach (var kvp in additionalProperties) + { + if (kvp.Value is JsonElement) + { + metadata[kvp.Key] = (JsonElement)kvp.Value!; + continue; + } + + metadata[kvp.Key] = JsonSerializer.SerializeToElement(kvp.Value, A2AJsonUtilities.DefaultOptions.GetTypeInfo(typeof(object))); + } + + return metadata; + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Purview/README.md b/dotnet/src/Microsoft.Agents.AI.Purview/README.md index 3e46ceff65..1ee2a25826 100644 --- a/dotnet/src/Microsoft.Agents.AI.Purview/README.md +++ b/dotnet/src/Microsoft.Agents.AI.Purview/README.md @@ -50,7 +50,7 @@ TokenCredential browserCredential = new InteractiveBrowserCredential( IChatClient client = new AzureOpenAIClient( new Uri(endpoint), new AzureCliCredential()) - .GetOpenAIResponseClient(deploymentName) + .GetResponsesClient(deploymentName) .AsIChatClient() .AsBuilder() .WithPurview(browserCredential, new PurviewSettings("My Sample App")) @@ -198,7 +198,7 @@ Use the chat middleware when you attach directly to a chat client (e.g. minimal IChatClient client = new AzureOpenAIClient( new Uri(endpoint), new AzureCliCredential()) - .GetOpenAIResponseClient(deploymentName) + .GetResponsesClient(deploymentName) .AsIChatClient() .AsBuilder() .WithPurview(browserCredential, new PurviewSettings("Agent Framework Test App")) diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowErrorEvent.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowErrorEvent.cs index 7af7efd0b9..aec9e8130c 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowErrorEvent.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowErrorEvent.cs @@ -10,4 +10,10 @@ namespace Microsoft.Agents.AI.Workflows; /// /// Optionally, the representing the error. /// -public class WorkflowErrorEvent(Exception? e) : WorkflowEvent(e); +public class WorkflowErrorEvent(Exception? e) : WorkflowEvent(e) +{ + /// + /// Gets the exception that caused the current operation to fail, if one occurred. + /// + public Exception? Exception => this.Data as Exception; +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs index 7c0479b85e..4e5ee86070 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs @@ -18,11 +18,12 @@ internal sealed class WorkflowHostAgent : AIAgent private readonly string? _id; private readonly CheckpointManager? _checkpointManager; private readonly IWorkflowExecutionEnvironment _executionEnvironment; + private readonly bool _includeExceptionDetails; private readonly Task _describeTask; private readonly ConcurrentDictionary _assignedRunIds = []; - public WorkflowHostAgent(Workflow workflow, string? id = null, string? name = null, string? description = null, CheckpointManager? checkpointManager = null, IWorkflowExecutionEnvironment? executionEnvironment = null) + public WorkflowHostAgent(Workflow workflow, string? id = null, string? name = null, string? description = null, CheckpointManager? checkpointManager = null, IWorkflowExecutionEnvironment? executionEnvironment = null, bool includeExceptionDetails = false) { this._workflow = Throw.IfNull(workflow); @@ -30,6 +31,7 @@ public WorkflowHostAgent(Workflow workflow, string? id = null, string? name = nu ? InProcessExecution.Concurrent : InProcessExecution.OffThread); this._checkpointManager = checkpointManager; + this._includeExceptionDetails = includeExceptionDetails; this._id = id; this.Name = name; @@ -61,12 +63,12 @@ private async ValueTask ValidateWorkflowAsync() protocol.ThrowIfNotChatProtocol(); } - public override AgentThread GetNewThread() => new WorkflowThread(this._workflow, this.GenerateNewId(), this._executionEnvironment, this._checkpointManager); + public override AgentThread GetNewThread() => new WorkflowThread(this._workflow, this.GenerateNewId(), this._executionEnvironment, this._checkpointManager, this._includeExceptionDetails); public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) - => new WorkflowThread(this._workflow, serializedThread, this._executionEnvironment, this._checkpointManager, jsonSerializerOptions); + => new WorkflowThread(this._workflow, serializedThread, this._executionEnvironment, this._checkpointManager, this._includeExceptionDetails, jsonSerializerOptions); - private async ValueTask UpdateThreadAsync(IEnumerable messages, AgentThread? thread = null, CancellationToken cancellationToken = default) + private ValueTask UpdateThreadAsync(IEnumerable messages, AgentThread? thread = null, CancellationToken cancellationToken = default) { thread ??= this.GetNewThread(); @@ -75,8 +77,10 @@ private async ValueTask UpdateThreadAsync(IEnumerable(workflowThread); } protected override async diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostingExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostingExtensions.cs index d48e99bf6e..c217039a35 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostingExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostingExtensions.cs @@ -21,6 +21,8 @@ public static class WorkflowHostingExtensions /// Specify the execution environment to use when running the workflows. See /// , and /// for the in-process environments. + /// If , will include + /// in the representing the workflow error. /// public static AIAgent AsAgent( this Workflow workflow, @@ -28,9 +30,10 @@ public static AIAgent AsAgent( string? name = null, string? description = null, CheckpointManager? checkpointManager = null, - IWorkflowExecutionEnvironment? executionEnvironment = null) + IWorkflowExecutionEnvironment? executionEnvironment = null, + bool includeExceptionDetails = false) { - return new WorkflowHostAgent(workflow, id, name, description, checkpointManager, executionEnvironment); + return new WorkflowHostAgent(workflow, id, name, description, checkpointManager, executionEnvironment, includeExceptionDetails); } internal static FunctionCallContent ToFunctionCall(this ExternalRequest request) diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowMessageStore.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowMessageStore.cs index 39c83bcadf..87cef04e76 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowMessageStore.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowMessageStore.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. using System.Collections.Generic; +using System.Linq; using System.Text.Json; using System.Threading; using System.Threading.Tasks; @@ -45,14 +46,21 @@ internal sealed class StoreState internal void AddMessages(params IEnumerable messages) => this._chatMessages.AddRange(messages); - public override Task AddMessagesAsync(IEnumerable messages, CancellationToken cancellationToken = default) + public override ValueTask> InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default) + => new(this._chatMessages.AsReadOnly()); + + public override ValueTask InvokedAsync(InvokedContext context, CancellationToken cancellationToken = default) { - this._chatMessages.AddRange(messages); + if (context.InvokeException is not null) + { + return default; + } - return Task.CompletedTask; - } + var allNewMessages = context.RequestMessages.Concat(context.AIContextProviderMessages ?? []).Concat(context.ResponseMessages ?? []); + this._chatMessages.AddRange(allNewMessages); - public override Task> GetMessagesAsync(CancellationToken cancellationToken = default) => Task.FromResult>(this._chatMessages.AsReadOnly()); + return default; + } public IEnumerable GetFromBookmark() { diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowThread.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowThread.cs index d27de6bd5c..94144831e0 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowThread.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowThread.cs @@ -3,6 +3,7 @@ using System; using System.Collections.Generic; using System.Linq; +using System.Reflection; using System.Runtime.CompilerServices; using System.Text.Json; using System.Threading; @@ -17,14 +18,16 @@ internal sealed class WorkflowThread : AgentThread { private readonly Workflow _workflow; private readonly IWorkflowExecutionEnvironment _executionEnvironment; + private readonly bool _includeExceptionDetails; private readonly CheckpointManager _checkpointManager; private readonly InMemoryCheckpointManager? _inMemoryCheckpointManager; - public WorkflowThread(Workflow workflow, string runId, IWorkflowExecutionEnvironment executionEnvironment, CheckpointManager? checkpointManager = null) + public WorkflowThread(Workflow workflow, string runId, IWorkflowExecutionEnvironment executionEnvironment, CheckpointManager? checkpointManager = null, bool includeExceptionDetails = false) { this._workflow = Throw.IfNull(workflow); this._executionEnvironment = Throw.IfNull(executionEnvironment); + this._includeExceptionDetails = includeExceptionDetails; // If the user provided an external checkpoint manager, use that, otherwise rely on an in-memory one. // TODO: Implement persist-only-last functionality for in-memory checkpoint manager, to avoid unbounded @@ -35,7 +38,7 @@ public WorkflowThread(Workflow workflow, string runId, IWorkflowExecutionEnviron this.MessageStore = new WorkflowMessageStore(); } - public WorkflowThread(Workflow workflow, JsonElement serializedThread, IWorkflowExecutionEnvironment executionEnvironment, CheckpointManager? checkpointManager = null, JsonSerializerOptions? jsonSerializerOptions = null) + public WorkflowThread(Workflow workflow, JsonElement serializedThread, IWorkflowExecutionEnvironment executionEnvironment, CheckpointManager? checkpointManager = null, bool includeExceptionDetails = false, JsonSerializerOptions? jsonSerializerOptions = null) { this._workflow = Throw.IfNull(workflow); this._executionEnvironment = Throw.IfNull(executionEnvironment); @@ -80,7 +83,7 @@ public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptio return marshaller.Marshal(info); } - public AgentRunResponseUpdate CreateUpdate(string responseId, params AIContent[] parts) + public AgentRunResponseUpdate CreateUpdate(string responseId, object raw, params AIContent[] parts) { Throw.IfNullOrEmpty(parts); @@ -89,7 +92,8 @@ public AgentRunResponseUpdate CreateUpdate(string responseId, params AIContent[] CreatedAt = DateTimeOffset.UtcNow, MessageId = Guid.NewGuid().ToString("N"), Role = ChatRole.Assistant, - ResponseId = responseId + ResponseId = responseId, + RawRepresentation = raw }; this.MessageStore.AddMessages(update.ToChatMessage()); @@ -153,10 +157,29 @@ IAsyncEnumerable InvokeStageAsync( case RequestInfoEvent requestInfo: FunctionCallContent fcContent = requestInfo.Request.ToFunctionCall(); - AgentRunResponseUpdate update = this.CreateUpdate(this.LastResponseId, fcContent); + AgentRunResponseUpdate update = this.CreateUpdate(this.LastResponseId, evt, fcContent); yield return update; break; + case WorkflowErrorEvent workflowError: + Exception? exception = workflowError.Exception; + if (exception is TargetInvocationException tie && tie.InnerException != null) + { + exception = tie.InnerException; + } + + if (exception != null) + { + string message = this._includeExceptionDetails + ? exception.Message + : "An error occurred while executing the workflow."; + + ErrorContent errorContent = new(message); + yield return this.CreateUpdate(this.LastResponseId, evt, errorContent); + } + + break; + case SuperStepCompletedEvent stepCompleted: this.LastCheckpoint = stepCompleted.CompletionInfo?.Checkpoint; goto default; diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs index f4a7fcd9c2..0fa6473de0 100644 --- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs @@ -162,7 +162,10 @@ static Task GetResponseAsync(IChatClient chatClient, List RunCoreStreami { var inputMessages = Throw.IfNull(messages) as IReadOnlyCollection ?? messages.ToList(); - (ChatClientAgentThread safeThread, ChatOptions? chatOptions, List inputMessagesForChatClient, IList? aiContextProviderMessages) = + (ChatClientAgentThread safeThread, + ChatOptions? chatOptions, + List inputMessagesForChatClient, + IList? aiContextProviderMessages, + IList? chatMessageStoreMessages, + ChatClientAgentContinuationToken? continuationToken) = await this.PrepareThreadAndMessagesAsync(thread, inputMessages, options, cancellationToken).ConfigureAwait(false); - ValidateStreamResumptionAllowed(chatOptions?.ContinuationToken, safeThread); - var chatClient = this.ChatClient; chatClient = ApplyRunOptionsTransformations(options, chatClient); @@ -214,7 +220,7 @@ protected override async IAsyncEnumerable RunCoreStreami this._logger.LogAgentChatClientInvokingAgent(nameof(RunStreamingAsync), this.Id, loggingAgentName, this._chatClientType); - List responseUpdates = []; + List responseUpdates = GetResponseUpdates(continuationToken); IAsyncEnumerator responseUpdatesEnumerator; @@ -225,7 +231,8 @@ protected override async IAsyncEnumerable RunCoreStreami } catch (Exception ex) { - await NotifyAIContextProviderOfFailureAsync(safeThread, ex, inputMessages, aiContextProviderMessages, cancellationToken).ConfigureAwait(false); + await NotifyMessageStoreOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), chatMessageStoreMessages, aiContextProviderMessages, cancellationToken).ConfigureAwait(false); + await NotifyAIContextProviderOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), aiContextProviderMessages, cancellationToken).ConfigureAwait(false); throw; } @@ -239,7 +246,8 @@ protected override async IAsyncEnumerable RunCoreStreami } catch (Exception ex) { - await NotifyAIContextProviderOfFailureAsync(safeThread, ex, inputMessages, aiContextProviderMessages, cancellationToken).ConfigureAwait(false); + await NotifyMessageStoreOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), chatMessageStoreMessages, aiContextProviderMessages, cancellationToken).ConfigureAwait(false); + await NotifyAIContextProviderOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), aiContextProviderMessages, cancellationToken).ConfigureAwait(false); throw; } @@ -251,7 +259,12 @@ protected override async IAsyncEnumerable RunCoreStreami update.AuthorName ??= this.Name; responseUpdates.Add(update); - yield return new(update) { AgentId = this.Id }; + + yield return new(update) + { + AgentId = this.Id, + ContinuationToken = WrapContinuationToken(update.ContinuationToken, GetInputMessages(inputMessages, continuationToken), responseUpdates) + }; } try @@ -260,7 +273,8 @@ protected override async IAsyncEnumerable RunCoreStreami } catch (Exception ex) { - await NotifyAIContextProviderOfFailureAsync(safeThread, ex, inputMessages, aiContextProviderMessages, cancellationToken).ConfigureAwait(false); + await NotifyMessageStoreOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), chatMessageStoreMessages, aiContextProviderMessages, cancellationToken).ConfigureAwait(false); + await NotifyAIContextProviderOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), aiContextProviderMessages, cancellationToken).ConfigureAwait(false); throw; } } @@ -272,10 +286,10 @@ protected override async IAsyncEnumerable RunCoreStreami this.UpdateThreadWithTypeAndConversationId(safeThread, chatResponse.ConversationId); // To avoid inconsistent state we only notify the thread of the input messages if no error occurs after the initial request. - await NotifyMessageStoreOfNewMessagesAsync(safeThread, inputMessages.Concat(aiContextProviderMessages ?? []).Concat(chatResponse.Messages), cancellationToken).ConfigureAwait(false); + await NotifyMessageStoreOfNewMessagesAsync(safeThread, GetInputMessages(inputMessages, continuationToken), chatMessageStoreMessages, aiContextProviderMessages, chatResponse.Messages, cancellationToken).ConfigureAwait(false); // Notify the AIContextProvider of all new messages. - await NotifyAIContextProviderOfSuccessAsync(safeThread, inputMessages, aiContextProviderMessages, chatResponse.Messages, cancellationToken).ConfigureAwait(false); + await NotifyAIContextProviderOfSuccessAsync(safeThread, GetInputMessages(inputMessages, continuationToken), aiContextProviderMessages, chatResponse.Messages, cancellationToken).ConfigureAwait(false); } /// @@ -379,7 +393,12 @@ private async Task RunCoreAsync ?? messages.ToList(); - (ChatClientAgentThread safeThread, ChatOptions? chatOptions, List inputMessagesForChatClient, IList? aiContextProviderMessages) = + (ChatClientAgentThread safeThread, + ChatOptions? chatOptions, + List inputMessagesForChatClient, + IList? aiContextProviderMessages, + IList? chatMessageStoreMessages, + ChatClientAgentContinuationToken? _) = await this.PrepareThreadAndMessagesAsync(thread, inputMessages, options, cancellationToken).ConfigureAwait(false); var chatClient = this.ChatClient; @@ -398,6 +417,7 @@ private async Task RunCoreAsync RunCoreAsyncOptional run options that may include specific chat configuration settings. /// A object representing the merged chat configuration, or if /// neither the run options nor the agent's chat options are available. - private ChatOptions? CreateConfiguredChatOptions(AgentRunOptions? runOptions) + private (ChatOptions?, ChatClientAgentContinuationToken?) CreateConfiguredChatOptions(AgentRunOptions? runOptions) { ChatOptions? requestChatOptions = (runOptions as ChatClientAgentRunOptions)?.ChatOptions?.Clone(); // If no agent chat options were provided, return the request chat options as is. if (this._agentOptions?.ChatOptions is null) { - return ApplyBackgroundResponsesProperties(requestChatOptions, runOptions); + return GetContinuationTokenAndApplyBackgroundResponsesProperties(requestChatOptions, runOptions); } // If no request chat options were provided, use the agent's chat options clone. if (requestChatOptions is null) { - return ApplyBackgroundResponsesProperties(this._agentOptions?.ChatOptions.Clone(), runOptions); + return GetContinuationTokenAndApplyBackgroundResponsesProperties(this._agentOptions?.ChatOptions.Clone(), runOptions); } // If both are present, we need to merge them. @@ -579,19 +599,26 @@ await thread.AIContextProvider.InvokedAsync(new(inputMessages, aiContextProvider } } - return ApplyBackgroundResponsesProperties(requestChatOptions, runOptions); + return GetContinuationTokenAndApplyBackgroundResponsesProperties(requestChatOptions, runOptions); - static ChatOptions? ApplyBackgroundResponsesProperties(ChatOptions? chatOptions, AgentRunOptions? agentRunOptions) + static (ChatOptions?, ChatClientAgentContinuationToken?) GetContinuationTokenAndApplyBackgroundResponsesProperties(ChatOptions? chatOptions, AgentRunOptions? agentRunOptions) { - // If any of the background response properties are set in the run options, we should apply both to the chat options. - if (agentRunOptions?.AllowBackgroundResponses is not null || agentRunOptions?.ContinuationToken is not null) + if (agentRunOptions?.AllowBackgroundResponses is not null) { chatOptions ??= new ChatOptions(); chatOptions.AllowBackgroundResponses = agentRunOptions.AllowBackgroundResponses; - chatOptions.ContinuationToken = agentRunOptions.ContinuationToken; } - return chatOptions; + ChatClientAgentContinuationToken? agentContinuationToken = null; + + if ((agentRunOptions?.ContinuationToken ?? chatOptions?.ContinuationToken) is { } continuationToken) + { + agentContinuationToken = ChatClientAgentContinuationToken.FromToken(continuationToken); + chatOptions ??= new ChatOptions(); + chatOptions.ContinuationToken = agentContinuationToken!.InnerToken; + } + + return (chatOptions, agentContinuationToken); } } @@ -602,14 +629,22 @@ await thread.AIContextProvider.InvokedAsync(new(inputMessages, aiContextProvider /// The input messages to use. /// Optional parameters for agent invocation. /// The to monitor for cancellation requests. The default is . - /// A tuple containing the thread, chat options, and thread messages. - private async Task<(ChatClientAgentThread AgentThread, ChatOptions? ChatOptions, List InputMessagesForChatClient, IList? AIContextProviderMessages)> PrepareThreadAndMessagesAsync( + /// A tuple containing the thread, chat options, messages and continuation token. + private async Task + <( + ChatClientAgentThread AgentThread, + ChatOptions? ChatOptions, + List InputMessagesForChatClient, + IList? AIContextProviderMessages, + IList? ChatMessageStoreMessages, + ChatClientAgentContinuationToken? ContinuationToken + )> PrepareThreadAndMessagesAsync( AgentThread? thread, IEnumerable inputMessages, AgentRunOptions? runOptions, CancellationToken cancellationToken) { - ChatOptions? chatOptions = this.CreateConfiguredChatOptions(runOptions); + (ChatOptions? chatOptions, ChatClientAgentContinuationToken? continuationToken) = this.CreateConfiguredChatOptions(runOptions); // Supplying a thread for background responses is required to prevent inconsistent experience // for callers if they forget to provide the thread for initial or follow-up runs. @@ -630,13 +665,9 @@ await thread.AIContextProvider.InvokedAsync(new(inputMessages, aiContextProvider throw new InvalidOperationException("Input messages are not allowed when continuing a background response using a continuation token."); } - if (chatOptions?.ContinuationToken is not null && typedThread.ConversationId is null && typedThread.MessageStore is null) - { - throw new InvalidOperationException("Continuation tokens are not allowed to be used for initial runs."); - } - List inputMessagesForChatClient = []; IList? aiContextProviderMessages = null; + IList? chatMessageStoreMessages = null; // Populate the thread messages only if we are not continuing an existing response as it's not allowed if (chatOptions?.ContinuationToken is null) @@ -644,9 +675,15 @@ await thread.AIContextProvider.InvokedAsync(new(inputMessages, aiContextProvider // Add any existing messages from the thread to the messages to be sent to the chat client. if (typedThread.MessageStore is not null) { - inputMessagesForChatClient.AddRange(await typedThread.MessageStore.GetMessagesAsync(cancellationToken).ConfigureAwait(false)); + var invokingContext = new ChatMessageStore.InvokingContext(inputMessages); + var storeMessages = await typedThread.MessageStore.InvokingAsync(invokingContext, cancellationToken).ConfigureAwait(false); + inputMessagesForChatClient.AddRange(storeMessages); + chatMessageStoreMessages = storeMessages as IList ?? storeMessages.ToList(); } + // Add the input messages before getting context from AIContextProvider. + inputMessagesForChatClient.AddRange(inputMessages); + // If we have an AIContextProvider, we should get context from it, and update our // messages and options with the additional context. if (typedThread.AIContextProvider is not null) @@ -675,9 +712,6 @@ await thread.AIContextProvider.InvokedAsync(new(inputMessages, aiContextProvider chatOptions.Instructions = string.IsNullOrWhiteSpace(chatOptions.Instructions) ? aiContext.Instructions : $"{chatOptions.Instructions}\n{aiContext.Instructions}"; } } - - // Add the input messages to the end of thread messages. - inputMessagesForChatClient.AddRange(inputMessages); } // If a user provided two different thread ids, via the thread object and options, we should throw @@ -698,7 +732,7 @@ await thread.AIContextProvider.InvokedAsync(new(inputMessages, aiContextProvider chatOptions.ConversationId = typedThread.ConversationId; } - return (typedThread, chatOptions, inputMessagesForChatClient, aiContextProviderMessages); + return (typedThread, chatOptions, inputMessagesForChatClient, aiContextProviderMessages, chatMessageStoreMessages, continuationToken); } private void UpdateThreadWithTypeAndConversationId(ChatClientAgentThread thread, string? responseConversationId) @@ -725,7 +759,13 @@ private void UpdateThreadWithTypeAndConversationId(ChatClientAgentThread thread, } } - private static Task NotifyMessageStoreOfNewMessagesAsync(ChatClientAgentThread thread, IEnumerable newMessages, CancellationToken cancellationToken) + private static Task NotifyMessageStoreOfFailureAsync( + ChatClientAgentThread thread, + Exception ex, + IEnumerable requestMessages, + IEnumerable? chatMessageStoreMessages, + IEnumerable? aiContextProviderMessages, + CancellationToken cancellationToken) { var messageStore = thread.MessageStore; @@ -733,32 +773,80 @@ private static Task NotifyMessageStoreOfNewMessagesAsync(ChatClientAgentThread t // If we don't have one, it means that the chat history is service managed and the underlying service is responsible for storing messages. if (messageStore is not null) { - return messageStore.AddMessagesAsync(newMessages, cancellationToken); + var invokedContext = new ChatMessageStore.InvokedContext(requestMessages, chatMessageStoreMessages!) + { + AIContextProviderMessages = aiContextProviderMessages, + InvokeException = ex + }; + + return messageStore.InvokedAsync(invokedContext, cancellationToken).AsTask(); } return Task.CompletedTask; } - private static void ValidateStreamResumptionAllowed(ResponseContinuationToken? continuationToken, ChatClientAgentThread safeThread) + private static Task NotifyMessageStoreOfNewMessagesAsync( + ChatClientAgentThread thread, + IEnumerable requestMessages, + IEnumerable? chatMessageStoreMessages, + IEnumerable? aiContextProviderMessages, + IEnumerable responseMessages, + CancellationToken cancellationToken) { - if (continuationToken is null) + var messageStore = thread.MessageStore; + + // Only notify the message store if we have one. + // If we don't have one, it means that the chat history is service managed and the underlying service is responsible for storing messages. + if (messageStore is not null) { - return; + var invokedContext = new ChatMessageStore.InvokedContext(requestMessages, chatMessageStoreMessages!) + { + AIContextProviderMessages = aiContextProviderMessages, + ResponseMessages = responseMessages + }; + return messageStore.InvokedAsync(invokedContext, cancellationToken).AsTask(); } - // Streaming resumption is only supported with chat history managed by the agent service because, currently, there's no good solution - // to collect updates received in failed runs and pass them to the last successful run so it can store them to the message store. - if (safeThread.ConversationId is null) + return Task.CompletedTask; + } + + private static ChatClientAgentContinuationToken? WrapContinuationToken(ResponseContinuationToken? continuationToken, IEnumerable? inputMessages = null, List? responseUpdates = null) + { + if (continuationToken is null) { - throw new NotSupportedException("Streaming resumption is only supported when chat history is stored and managed by the underlying AI service."); + return null; } - // Similarly, streaming resumption is not supported when a context provider is used because, currently, there's no good solution - // to collect updates received in failed runs and pass them to the last successful run so it can notify the context provider of the updates. - if (safeThread.AIContextProvider is not null) + return new(continuationToken) + { + // Save input messages to the continuation token so they can be added to the thread and + // provided to the context provider in the last successful streaming resumption run. + // That's necessary for scenarios where initial streaming run is interrupted and streaming is resumed later. + InputMessages = inputMessages?.Any() is true ? inputMessages : null, + + // Save all updates received so far to the continuation token so they can be provided to the + // message store and context provider in the last successful streaming resumption run. + // That's necessary for scenarios where a streaming run is interrupted after some updates were received. + ResponseUpdates = responseUpdates?.Count > 0 ? responseUpdates : null + }; + } + + private static IEnumerable GetInputMessages(IReadOnlyCollection inputMessages, ChatClientAgentContinuationToken? token) + { + // First, use input messages if provided. + if (inputMessages.Count > 0) { - throw new NotSupportedException("Using context provider with streaming resumption is not supported."); + return inputMessages; } + + // Fallback to messages saved in the continuation token if available. + return token?.InputMessages ?? []; + } + + private static List GetResponseUpdates(ChatClientAgentContinuationToken? token) + { + // Restore any previously received updates from the continuation token. + return token?.ResponseUpdates?.ToList() ?? []; } private string GetLoggingAgentName() => this.Name ?? "UnnamedAgent"; diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentContinuationToken.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentContinuationToken.cs new file mode 100644 index 0000000000..aa5659b1d1 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentContinuationToken.cs @@ -0,0 +1,170 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text.Json; +using System.Text.Json.Serialization; +using Microsoft.Extensions.AI; +using Microsoft.Shared.Diagnostics; + +namespace Microsoft.Agents.AI; + +/// +/// Represents a continuation token for ChatClientAgent operations. +/// +internal class ChatClientAgentContinuationToken : ResponseContinuationToken +{ + private const string TokenTypeName = "chatClientAgentContinuationToken"; + private const string TypeDiscriminator = "type"; + + /// + /// Initializes a new instance of the class. + /// + /// A continuation token provided by the underlying . + [JsonConstructor] + internal ChatClientAgentContinuationToken(ResponseContinuationToken innerToken) + { + this.InnerToken = innerToken; + } + + public override ReadOnlyMemory ToBytes() + { + using MemoryStream stream = new(); + using Utf8JsonWriter writer = new(stream); + + writer.WriteStartObject(); + + // This property should be the first one written to identify the type during deserialization. + writer.WriteString(TypeDiscriminator, TokenTypeName); + + writer.WriteString("innerToken", JsonSerializer.Serialize(this.InnerToken, AgentJsonUtilities.DefaultOptions.GetTypeInfo(typeof(ResponseContinuationToken)))); + + if (this.InputMessages?.Any() is true) + { + writer.WriteString("inputMessages", JsonSerializer.Serialize(this.InputMessages, AgentJsonUtilities.DefaultOptions.GetTypeInfo(typeof(IEnumerable)))); + } + + if (this.ResponseUpdates?.Count > 0) + { + writer.WriteString("responseUpdates", JsonSerializer.Serialize(this.ResponseUpdates, AgentJsonUtilities.DefaultOptions.GetTypeInfo(typeof(IReadOnlyList)))); + } + + writer.WriteEndObject(); + + writer.Flush(); + + return stream.ToArray(); + } + + /// + /// Create a new instance of from the provided . + /// + /// The token to create the from. + /// A equivalent of the provided . + internal static ChatClientAgentContinuationToken FromToken(ResponseContinuationToken token) + { + if (token is ChatClientAgentContinuationToken chatClientContinuationToken) + { + return chatClientContinuationToken; + } + + ReadOnlyMemory data = token.ToBytes(); + + if (data.Length == 0) + { + Throw.ArgumentException(nameof(token), "Failed to create ChatClientAgentContinuationToken from provided token because it does not contain any data."); + } + + Utf8JsonReader reader = new(data.Span); + + // Move to the start object token. + _ = reader.Read(); + + // Validate that the token is of this type. + ValidateTokenType(reader, token); + + ResponseContinuationToken? innerToken = null; + IEnumerable? inputMessages = null; + IReadOnlyList? responseUpdates = null; + + while (reader.Read()) + { + if (reader.TokenType == JsonTokenType.EndObject) + { + break; + } + + if (reader.TokenType != JsonTokenType.PropertyName) + { + continue; + } + switch (reader.GetString()) + { + case "innerToken": + _ = reader.Read(); + var innerTokenJson = reader.GetString() ?? throw new ArgumentException("No content for innerToken property.", nameof(token)); + innerToken = (ResponseContinuationToken?)JsonSerializer.Deserialize(innerTokenJson, AgentJsonUtilities.DefaultOptions.GetTypeInfo(typeof(ResponseContinuationToken))); + break; + case "inputMessages": + _ = reader.Read(); + var innerMessagesJson = reader.GetString() ?? throw new ArgumentException("No content for inputMessages property.", nameof(token)); + inputMessages = (IEnumerable?)JsonSerializer.Deserialize(innerMessagesJson, AgentJsonUtilities.DefaultOptions.GetTypeInfo(typeof(IEnumerable))); + break; + case "responseUpdates": + _ = reader.Read(); + var responseUpdatesJson = reader.GetString() ?? throw new ArgumentException("No content for responseUpdates property.", nameof(token)); + responseUpdates = (IReadOnlyList?)JsonSerializer.Deserialize(responseUpdatesJson, AgentJsonUtilities.DefaultOptions.GetTypeInfo(typeof(IReadOnlyList))); + break; + default: + break; + } + } + + if (innerToken is null) + { + Throw.ArgumentException(nameof(token), "Failed to create ChatClientAgentContinuationToken from provided token because it does not contain an inner token."); + } + + return new ChatClientAgentContinuationToken(innerToken) + { + InputMessages = inputMessages, + ResponseUpdates = responseUpdates + }; + } + + private static void ValidateTokenType(Utf8JsonReader reader, ResponseContinuationToken token) + { + try + { + // Move to the first property. + _ = reader.Read(); + + // If the first property name is not "type", or its value does not match this token type name, then we know its not this token type. + if (reader.GetString() != TypeDiscriminator || !reader.Read() || reader.GetString() != TokenTypeName) + { + Throw.ArgumentException(nameof(token), "Failed to create ChatClientAgentContinuationToken from provided token because it is not of the correct type."); + } + } + catch (JsonException ex) + { + Throw.ArgumentException(nameof(token), "Failed to create ChatClientAgentContinuationToken from provided token because it could not be parsed.", ex); + } + } + + /// + /// Gets a continuation token provided by the underlying . + /// + internal ResponseContinuationToken InnerToken { get; } + + /// + /// Gets or sets the input messages used for streaming run. + /// + internal IEnumerable? InputMessages { get; set; } + + /// + /// Gets or sets the response updates received so far. + /// + internal IReadOnlyList? ResponseUpdates { get; set; } +} diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentCustomOptions.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentCustomOptions.cs new file mode 100644 index 0000000000..b0cbd3d793 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentCustomOptions.cs @@ -0,0 +1,253 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI; + +/// +/// Provides extension methods for to enable discoverability of . +/// +public partial class ChatClientAgent +{ + /// + /// Run the agent with no message assuming that all required instructions are already provided to the agent or on the thread. + /// + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with any response messages generated during invocation. + /// + /// Configuration parameters for controlling the agent's invocation behavior. + /// The to monitor for cancellation requests. The default is . + /// A task that represents the asynchronous operation. The task result contains an with the agent's output. + public Task RunAsync( + AgentThread? thread, + ChatClientAgentRunOptions? options, + CancellationToken cancellationToken = default) => + this.RunAsync(thread, (AgentRunOptions?)options, cancellationToken); + + /// + /// Runs the agent with a text message from the user. + /// + /// The user message to send to the agent. + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with the input message and any response messages generated during invocation. + /// + /// Configuration parameters for controlling the agent's invocation behavior. + /// The to monitor for cancellation requests. The default is . + /// A task that represents the asynchronous operation. The task result contains an with the agent's output. + public Task RunAsync( + string message, + AgentThread? thread, + ChatClientAgentRunOptions? options, + CancellationToken cancellationToken = default) => + this.RunAsync(message, thread, (AgentRunOptions?)options, cancellationToken); + + /// + /// Runs the agent with a single chat message. + /// + /// The chat message to send to the agent. + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with the input message and any response messages generated during invocation. + /// + /// Configuration parameters for controlling the agent's invocation behavior. + /// The to monitor for cancellation requests. The default is . + /// A task that represents the asynchronous operation. The task result contains an with the agent's output. + public Task RunAsync( + ChatMessage message, + AgentThread? thread, + ChatClientAgentRunOptions? options, + CancellationToken cancellationToken = default) => + this.RunAsync(message, thread, (AgentRunOptions?)options, cancellationToken); + + /// + /// Runs the agent with a collection of chat messages. + /// + /// The collection of messages to send to the agent for processing. + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with the input messages and any response messages generated during invocation. + /// + /// Configuration parameters for controlling the agent's invocation behavior. + /// The to monitor for cancellation requests. The default is . + /// A task that represents the asynchronous operation. The task result contains an with the agent's output. + public Task RunAsync( + IEnumerable messages, + AgentThread? thread, + ChatClientAgentRunOptions? options, + CancellationToken cancellationToken = default) => + this.RunAsync(messages, thread, (AgentRunOptions?)options, cancellationToken); + + /// + /// Runs the agent in streaming mode without providing new input messages, relying on existing context and instructions. + /// + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with any response messages generated during invocation. + /// + /// Configuration parameters for controlling the agent's invocation behavior. + /// The to monitor for cancellation requests. The default is . + /// An asynchronous enumerable of instances representing the streaming response. + public IAsyncEnumerable RunStreamingAsync( + AgentThread? thread, + ChatClientAgentRunOptions? options, + CancellationToken cancellationToken = default) => + this.RunStreamingAsync(thread, (AgentRunOptions?)options, cancellationToken); + + /// + /// Runs the agent in streaming mode with a text message from the user. + /// + /// The user message to send to the agent. + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with the input message and any response messages generated during invocation. + /// + /// Configuration parameters for controlling the agent's invocation behavior. + /// The to monitor for cancellation requests. The default is . + /// An asynchronous enumerable of instances representing the streaming response. + public IAsyncEnumerable RunStreamingAsync( + string message, + AgentThread? thread, + ChatClientAgentRunOptions? options, + CancellationToken cancellationToken = default) => + this.RunStreamingAsync(message, thread, (AgentRunOptions?)options, cancellationToken); + + /// + /// Runs the agent in streaming mode with a single chat message. + /// + /// The chat message to send to the agent. + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with the input message and any response messages generated during invocation. + /// + /// Configuration parameters for controlling the agent's invocation behavior. + /// The to monitor for cancellation requests. The default is . + /// An asynchronous enumerable of instances representing the streaming response. + public IAsyncEnumerable RunStreamingAsync( + ChatMessage message, + AgentThread? thread, + ChatClientAgentRunOptions? options, + CancellationToken cancellationToken = default) => + this.RunStreamingAsync(message, thread, (AgentRunOptions?)options, cancellationToken); + + /// + /// Runs the agent in streaming mode with a collection of chat messages. + /// + /// The collection of messages to send to the agent for processing. + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with the input messages and any response updates generated during invocation. + /// + /// Configuration parameters for controlling the agent's invocation behavior. + /// The to monitor for cancellation requests. The default is . + /// An asynchronous enumerable of instances representing the streaming response. + public IAsyncEnumerable RunStreamingAsync( + IEnumerable messages, + AgentThread? thread, + ChatClientAgentRunOptions? options, + CancellationToken cancellationToken = default) => + this.RunStreamingAsync(messages, thread, (AgentRunOptions?)options, cancellationToken); + + /// + /// Run the agent with no message assuming that all required instructions are already provided to the agent or on the thread, and requesting a response of the specified type . + /// + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with any response messages generated during invocation. + /// + /// The JSON serialization options to use. + /// Configuration parameters for controlling the agent's invocation behavior. + /// + /// to set a JSON schema on the ; otherwise, . The default is . + /// Using a JSON schema improves reliability if the underlying model supports native structured output with a schema, but might cause an error if the model does not support it. + /// + /// The to monitor for cancellation requests. The default is . + /// A task that represents the asynchronous operation. The task result contains an with the agent's output. + public Task> RunAsync( + AgentThread? thread, + JsonSerializerOptions? serializerOptions, + ChatClientAgentRunOptions? options, + bool? useJsonSchemaResponseFormat = null, + CancellationToken cancellationToken = default) => + this.RunAsync(thread, serializerOptions, (AgentRunOptions?)options, useJsonSchemaResponseFormat, cancellationToken); + + /// + /// Runs the agent with a text message from the user, requesting a response of the specified type . + /// + /// The user message to send to the agent. + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with the input message and any response messages generated during invocation. + /// + /// The JSON serialization options to use. + /// Configuration parameters for controlling the agent's invocation behavior. + /// + /// to set a JSON schema on the ; otherwise, . The default is . + /// Using a JSON schema improves reliability if the underlying model supports native structured output with a schema, but might cause an error if the model does not support it. + /// + /// The to monitor for cancellation requests. The default is . + /// A task that represents the asynchronous operation. The task result contains an with the agent's output. + public Task> RunAsync( + string message, + AgentThread? thread, + JsonSerializerOptions? serializerOptions, + ChatClientAgentRunOptions? options, + bool? useJsonSchemaResponseFormat = null, + CancellationToken cancellationToken = default) => + this.RunAsync(message, thread, serializerOptions, (AgentRunOptions?)options, useJsonSchemaResponseFormat, cancellationToken); + + /// + /// Runs the agent with a single chat message, requesting a response of the specified type . + /// + /// The chat message to send to the agent. + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with the input message and any response messages generated during invocation. + /// + /// The JSON serialization options to use. + /// Configuration parameters for controlling the agent's invocation behavior. + /// + /// to set a JSON schema on the ; otherwise, . The default is . + /// Using a JSON schema improves reliability if the underlying model supports native structured output with a schema, but might cause an error if the model does not support it. + /// + /// The to monitor for cancellation requests. The default is . + /// A task that represents the asynchronous operation. The task result contains an with the agent's output. + public Task> RunAsync( + ChatMessage message, + AgentThread? thread, + JsonSerializerOptions? serializerOptions, + ChatClientAgentRunOptions? options, + bool? useJsonSchemaResponseFormat = null, + CancellationToken cancellationToken = default) => + this.RunAsync(message, thread, serializerOptions, (AgentRunOptions?)options, useJsonSchemaResponseFormat, cancellationToken); + + /// + /// Runs the agent with a collection of chat messages, requesting a response of the specified type . + /// + /// The collection of messages to send to the agent for processing. + /// + /// The conversation thread to use for this invocation. If , a new thread will be created. + /// The thread will be updated with the input messages and any response messages generated during invocation. + /// + /// The JSON serialization options to use. + /// Configuration parameters for controlling the agent's invocation behavior. + /// + /// to set a JSON schema on the ; otherwise, . The default is . + /// Using a JSON schema improves reliability if the underlying model supports native structured output with a schema, but might cause an error if the model does not support it. + /// + /// The to monitor for cancellation requests. The default is . + /// A task that represents the asynchronous operation. The task result contains an with the agent's output. + public Task> RunAsync( + IEnumerable messages, + AgentThread? thread, + JsonSerializerOptions? serializerOptions, + ChatClientAgentRunOptions? options, + bool? useJsonSchemaResponseFormat = null, + CancellationToken cancellationToken = default) => + this.RunAsync(messages, thread, serializerOptions, (AgentRunOptions?)options, useJsonSchemaResponseFormat, cancellationToken); +} diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentOptions.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentOptions.cs index 4a72d66f2d..dd1ff3b228 100644 --- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentOptions.cs +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentOptions.cs @@ -80,7 +80,7 @@ public ChatClientAgentOptions Clone() /// /// Context object passed to the to create a new instance of . /// - public class AIContextProviderFactoryContext + public sealed class AIContextProviderFactoryContext { /// /// Gets or sets the serialized state of the , if any. @@ -97,7 +97,7 @@ public class AIContextProviderFactoryContext /// /// Context object passed to the to create a new instance of . /// - public class ChatMessageStoreFactoryContext + public sealed class ChatMessageStoreFactoryContext { /// /// Gets or sets the serialized state of the chat message store, if any. diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentRunResponse{T}.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentRunResponse{T}.cs index 13b536a457..352be764eb 100644 --- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentRunResponse{T}.cs +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentRunResponse{T}.cs @@ -40,7 +40,6 @@ public ChatClientAgentRunResponse(ChatResponse response) : base(response) /// /// /// If the response did not contain JSON, or if deserialization fails, this property will throw. - /// To avoid exceptions, use instead. /// public override T Result => this._response.Result; } diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentStructuredOutput.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentStructuredOutput.cs index 913be969c6..9a535cd645 100644 --- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentStructuredOutput.cs +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentStructuredOutput.cs @@ -154,7 +154,10 @@ async Task> GetResponseAsync(IChatClient chatClient, List CreateResponse(ChatResponse chatResponse) { - return new ChatClientAgentRunResponse(chatResponse); + return new ChatClientAgentRunResponse(chatResponse) + { + ContinuationToken = WrapContinuationToken(chatResponse.ContinuationToken) + }; } return this.RunCoreAsync(GetResponseAsync, CreateResponse, messages, thread, options, cancellationToken); diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs index 7f0ce9a1ea..f4cf4aa033 100644 --- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs @@ -11,7 +11,7 @@ namespace Microsoft.Agents.AI; /// Provides a thread implementation for use with . /// [DebuggerDisplay("{DebuggerDisplay,nq}")] -public class ChatClientAgentThread : AgentThread +public sealed class ChatClientAgentThread : AgentThread { private ChatMessageStore? _messageStore; @@ -171,9 +171,7 @@ public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptio /// public override object? GetService(Type serviceType, object? serviceKey = null) => - serviceType == typeof(AgentThreadMetadata) - ? new AgentThreadMetadata(this.ConversationId) - : base.GetService(serviceType, serviceKey) + base.GetService(serviceType, serviceKey) ?? this.AIContextProvider?.GetService(serviceType, serviceKey) ?? this.MessageStore?.GetService(serviceType, serviceKey); diff --git a/dotnet/tests/AnthropicChatCompletion.IntegrationTests/AnthropicChatCompletionFixture.cs b/dotnet/tests/AnthropicChatCompletion.IntegrationTests/AnthropicChatCompletionFixture.cs index 72c0b14ae2..2bec0b366e 100644 --- a/dotnet/tests/AnthropicChatCompletion.IntegrationTests/AnthropicChatCompletionFixture.cs +++ b/dotnet/tests/AnthropicChatCompletion.IntegrationTests/AnthropicChatCompletionFixture.cs @@ -39,7 +39,12 @@ public async Task> GetChatHistoryAsync(AgentThread thread) { var typedThread = (ChatClientAgentThread)thread; - return typedThread.MessageStore is null ? [] : (await typedThread.MessageStore.GetMessagesAsync()).ToList(); + if (typedThread.MessageStore is null) + { + return []; + } + + return (await typedThread.MessageStore.InvokingAsync(new([]))).ToList(); } public Task CreateChatClientAgentAsync( diff --git a/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientFixture.cs b/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientFixture.cs index 883b317f5e..ddb015eb17 100644 --- a/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientFixture.cs +++ b/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientFixture.cs @@ -48,7 +48,12 @@ public async Task> GetChatHistoryAsync(AgentThread thread) return await this.GetChatHistoryFromResponsesChainAsync(chatClientThread.ConversationId); } - return chatClientThread.MessageStore is null ? [] : (await chatClientThread.MessageStore.GetMessagesAsync()).ToList(); + if (chatClientThread.MessageStore is null) + { + return []; + } + + return (await chatClientThread.MessageStore.InvokingAsync(new([]))).ToList(); } private async Task> GetChatHistoryFromResponsesChainAsync(string conversationId) diff --git a/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/A2AAgentTests.cs b/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/A2AAgentTests.cs index 0b491fb303..236ae7b332 100644 --- a/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/A2AAgentTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/A2AAgentTests.cs @@ -832,6 +832,174 @@ await Assert.ThrowsAsync(async () => }); } + [Fact] + public async Task RunAsync_WithAgentMessageResponseMetadata_ReturnsMetadataAsAdditionalPropertiesAsync() + { + // Arrange + this._handler.ResponseToReturn = new AgentMessage + { + MessageId = "response-123", + Role = MessageRole.Agent, + Parts = [new TextPart { Text = "Response with metadata" }], + Metadata = new Dictionary + { + { "responseKey1", JsonSerializer.SerializeToElement("responseValue1") }, + { "responseCount", JsonSerializer.SerializeToElement(99) } + } + }; + + var inputMessages = new List + { + new(ChatRole.User, "Test message") + }; + + // Act + var result = await this._agent.RunAsync(inputMessages); + + // Assert + Assert.NotNull(result.AdditionalProperties); + Assert.NotNull(result.AdditionalProperties["responseKey1"]); + Assert.Equal("responseValue1", ((JsonElement)result.AdditionalProperties["responseKey1"]!).GetString()); + Assert.NotNull(result.AdditionalProperties["responseCount"]); + Assert.Equal(99, ((JsonElement)result.AdditionalProperties["responseCount"]!).GetInt32()); + } + + [Fact] + public async Task RunAsync_WithAdditionalProperties_PropagatesThemAsMetadataToMessageSendParamsAsync() + { + // Arrange + this._handler.ResponseToReturn = new AgentMessage + { + MessageId = "response-123", + Role = MessageRole.Agent, + Parts = [new TextPart { Text = "Response" }] + }; + + var inputMessages = new List + { + new(ChatRole.User, "Test message") + }; + + var options = new AgentRunOptions + { + AdditionalProperties = new() + { + { "key1", "value1" }, + { "key2", 42 }, + { "key3", true } + } + }; + + // Act + await this._agent.RunAsync(inputMessages, null, options); + + // Assert + Assert.NotNull(this._handler.CapturedMessageSendParams); + Assert.NotNull(this._handler.CapturedMessageSendParams.Metadata); + Assert.Equal("value1", this._handler.CapturedMessageSendParams.Metadata["key1"].GetString()); + Assert.Equal(42, this._handler.CapturedMessageSendParams.Metadata["key2"].GetInt32()); + Assert.True(this._handler.CapturedMessageSendParams.Metadata["key3"].GetBoolean()); + } + + [Fact] + public async Task RunAsync_WithNullAdditionalProperties_DoesNotSetMetadataAsync() + { + // Arrange + this._handler.ResponseToReturn = new AgentMessage + { + MessageId = "response-123", + Role = MessageRole.Agent, + Parts = [new TextPart { Text = "Response" }] + }; + + var inputMessages = new List + { + new(ChatRole.User, "Test message") + }; + + var options = new AgentRunOptions + { + AdditionalProperties = null + }; + + // Act + await this._agent.RunAsync(inputMessages, null, options); + + // Assert + Assert.NotNull(this._handler.CapturedMessageSendParams); + Assert.Null(this._handler.CapturedMessageSendParams.Metadata); + } + + [Fact] + public async Task RunStreamingAsync_WithAdditionalProperties_PropagatesThemAsMetadataToMessageSendParamsAsync() + { + // Arrange + this._handler.StreamingResponseToReturn = new AgentMessage + { + MessageId = "stream-123", + Role = MessageRole.Agent, + Parts = [new TextPart { Text = "Streaming response" }] + }; + + var inputMessages = new List + { + new(ChatRole.User, "Test streaming message") + }; + + var options = new AgentRunOptions + { + AdditionalProperties = new() + { + { "streamKey1", "streamValue1" }, + { "streamKey2", 100 }, + { "streamKey3", false } + } + }; + + // Act + await foreach (var _ in this._agent.RunStreamingAsync(inputMessages, null, options)) + { + } + + // Assert + Assert.NotNull(this._handler.CapturedMessageSendParams); + Assert.NotNull(this._handler.CapturedMessageSendParams.Metadata); + Assert.Equal("streamValue1", this._handler.CapturedMessageSendParams.Metadata["streamKey1"].GetString()); + Assert.Equal(100, this._handler.CapturedMessageSendParams.Metadata["streamKey2"].GetInt32()); + Assert.False(this._handler.CapturedMessageSendParams.Metadata["streamKey3"].GetBoolean()); + } + + [Fact] + public async Task RunStreamingAsync_WithNullAdditionalProperties_DoesNotSetMetadataAsync() + { + // Arrange + this._handler.StreamingResponseToReturn = new AgentMessage + { + MessageId = "stream-123", + Role = MessageRole.Agent, + Parts = [new TextPart { Text = "Streaming response" }] + }; + + var inputMessages = new List + { + new(ChatRole.User, "Test streaming message") + }; + + var options = new AgentRunOptions + { + AdditionalProperties = null + }; + + // Act + await foreach (var _ in this._agent.RunStreamingAsync(inputMessages, null, options)) + { + } + + // Assert + Assert.NotNull(this._handler.CapturedMessageSendParams); + Assert.Null(this._handler.CapturedMessageSendParams.Metadata); + } + [Fact] public async Task RunAsync_WithInvalidThreadType_ThrowsInvalidOperationExceptionAsync() { diff --git a/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/Extensions/AdditionalPropertiesDictionaryExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/Extensions/AdditionalPropertiesDictionaryExtensionsTests.cs new file mode 100644 index 0000000000..4972b8857f --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.A2A.UnitTests/Extensions/AdditionalPropertiesDictionaryExtensionsTests.cs @@ -0,0 +1,186 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Text.Json; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.A2A.UnitTests; + +/// +/// Unit tests for the class. +/// +public sealed class AdditionalPropertiesDictionaryExtensionsTests +{ + [Fact] + public void ToA2AMetadata_WithNullAdditionalProperties_ReturnsNull() + { + // Arrange + AdditionalPropertiesDictionary? additionalProperties = null; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.Null(result); + } + + [Fact] + public void ToA2AMetadata_WithEmptyAdditionalProperties_ReturnsNull() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = []; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.Null(result); + } + + [Fact] + public void ToA2AMetadata_WithStringValue_ReturnsMetadataWithJsonElement() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new() + { + { "stringKey", "stringValue" } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("stringKey")); + Assert.Equal("stringValue", result["stringKey"].GetString()); + } + + [Fact] + public void ToA2AMetadata_WithNumericValue_ReturnsMetadataWithJsonElement() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new() + { + { "numberKey", 42 } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("numberKey")); + Assert.Equal(42, result["numberKey"].GetInt32()); + } + + [Fact] + public void ToA2AMetadata_WithBooleanValue_ReturnsMetadataWithJsonElement() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new() + { + { "booleanKey", true } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("booleanKey")); + Assert.True(result["booleanKey"].GetBoolean()); + } + + [Fact] + public void ToA2AMetadata_WithMultipleProperties_ReturnsMetadataWithAllProperties() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new() + { + { "stringKey", "stringValue" }, + { "numberKey", 42 }, + { "booleanKey", true } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Equal(3, result.Count); + + Assert.True(result.ContainsKey("stringKey")); + Assert.Equal("stringValue", result["stringKey"].GetString()); + + Assert.True(result.ContainsKey("numberKey")); + Assert.Equal(42, result["numberKey"].GetInt32()); + + Assert.True(result.ContainsKey("booleanKey")); + Assert.True(result["booleanKey"].GetBoolean()); + } + + [Fact] + public void ToA2AMetadata_WithArrayValue_ReturnsMetadataWithJsonElement() + { + // Arrange + int[] arrayValue = [1, 2, 3]; + AdditionalPropertiesDictionary additionalProperties = new() + { + { "arrayKey", arrayValue } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("arrayKey")); + Assert.Equal(JsonValueKind.Array, result["arrayKey"].ValueKind); + Assert.Equal(3, result["arrayKey"].GetArrayLength()); + } + + [Fact] + public void ToA2AMetadata_WithNullValue_ReturnsMetadataWithNullJsonElement() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new() + { + { "nullKey", null! } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("nullKey")); + Assert.Equal(JsonValueKind.Null, result["nullKey"].ValueKind); + } + + [Fact] + public void ToA2AMetadata_WithJsonElementValue_ReturnsMetadataWithJsonElement() + { + // Arrange + JsonElement jsonElement = JsonSerializer.SerializeToElement(new { name = "test", value = 123 }); + AdditionalPropertiesDictionary additionalProperties = new() + { + { "jsonElementKey", jsonElement } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("jsonElementKey")); + Assert.Equal(JsonValueKind.Object, result["jsonElementKey"].ValueKind); + Assert.Equal("test", result["jsonElementKey"].GetProperty("name").GetString()); + Assert.Equal(123, result["jsonElementKey"].GetProperty("value").GetInt32()); + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentRunResponseTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentRunResponseTests.cs index 981f1e3933..8e39b4c4fa 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentRunResponseTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentRunResponseTests.cs @@ -57,7 +57,7 @@ public void ConstructorWithChatResponseRoundtrips() RawRepresentation = new object(), ResponseId = "responseId", Usage = new UsageDetails(), - ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }), + ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }; AgentRunResponse response = new(chatResponse); @@ -214,6 +214,12 @@ public void ToAgentRunResponseUpdatesProducesUpdates() Assert.Equal(100, usageContent.Details.TotalTokenCount); } +#if NETFRAMEWORK + /// + /// Since Json Serialization using reflection is disabled in .net core builds, and we are using a custom type here that wouldn't + /// be registered with the default source generated serializer, this test will only pass in .net framework builds where reflection-based + /// serialization is available. + /// [Fact] public void ParseAsStructuredOutputSuccess() { @@ -221,6 +227,24 @@ public void ParseAsStructuredOutputSuccess() var expectedResult = new Animal { Id = 1, FullName = "Tigger", Species = Species.Tiger }; var response = new AgentRunResponse(new ChatMessage(ChatRole.Assistant, JsonSerializer.Serialize(expectedResult, TestJsonSerializerContext.Default.Animal))); + // Act. + var animal = response.Deserialize(); + + // Assert. + Assert.NotNull(animal); + Assert.Equal(expectedResult.Id, animal.Id); + Assert.Equal(expectedResult.FullName, animal.FullName); + Assert.Equal(expectedResult.Species, animal.Species); + } +#endif + + [Fact] + public void ParseAsStructuredOutputWithJSOSuccess() + { + // Arrange. + var expectedResult = new Animal { Id = 1, FullName = "Tigger", Species = Species.Tiger }; + var response = new AgentRunResponse(new ChatMessage(ChatRole.Assistant, JsonSerializer.Serialize(expectedResult, TestJsonSerializerContext.Default.Animal))); + // Act. var animal = response.Deserialize(TestJsonSerializerContext.Default.Options); @@ -262,6 +286,12 @@ public void ParseAsStructuredOutputFailsWithIncorrectTypedJson() Assert.Throws(() => response.Deserialize(TestJsonSerializerContext.Default.Options)); } +#if NETFRAMEWORK + /// + /// Since Json Serialization using reflection is disabled in .net core builds, and we are using a custom type here that wouldn't + /// be registered with the default source generated serializer, this test will only pass in .net framework builds where reflection-based + /// serialization is available. + /// [Fact] public void TryParseAsStructuredOutputSuccess() { @@ -269,6 +299,24 @@ public void TryParseAsStructuredOutputSuccess() var expectedResult = new Animal { Id = 1, FullName = "Tigger", Species = Species.Tiger }; var response = new AgentRunResponse(new ChatMessage(ChatRole.Assistant, JsonSerializer.Serialize(expectedResult, TestJsonSerializerContext.Default.Animal))); + // Act. + response.TryDeserialize(out Animal? animal); + + // Assert. + Assert.NotNull(animal); + Assert.Equal(expectedResult.Id, animal.Id); + Assert.Equal(expectedResult.FullName, animal.FullName); + Assert.Equal(expectedResult.Species, animal.Species); + } +#endif + + [Fact] + public void TryParseAsStructuredOutputWithJSOSuccess() + { + // Arrange. + var expectedResult = new Animal { Id = 1, FullName = "Tigger", Species = Species.Tiger }; + var response = new AgentRunResponse(new ChatMessage(ChatRole.Assistant, JsonSerializer.Serialize(expectedResult, TestJsonSerializerContext.Default.Animal))); + // Act. response.TryDeserialize(TestJsonSerializerContext.Default.Options, out Animal? animal); diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreMessageFilterTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreMessageFilterTests.cs new file mode 100644 index 0000000000..ab10c377ae --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreMessageFilterTests.cs @@ -0,0 +1,205 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.AI; +using Moq; + +namespace Microsoft.Agents.AI.Abstractions.UnitTests; + +/// +/// Contains tests for the class. +/// +public sealed class ChatMessageStoreMessageFilterTests +{ + [Fact] + public void Constructor_WithNullInnerStore_ThrowsArgumentNullException() + { + // Arrange, Act & Assert + Assert.Throws(() => new ChatMessageStoreMessageFilter(null!)); + } + + [Fact] + public void Constructor_WithOnlyInnerStore_Throws() + { + // Arrange + var innerStoreMock = new Mock(); + + // Act & Assert + Assert.Throws(() => new ChatMessageStoreMessageFilter(innerStoreMock.Object)); + } + + [Fact] + public void Constructor_WithAllParameters_CreatesInstance() + { + // Arrange + var innerStoreMock = new Mock(); + + IEnumerable InvokingFilter(IEnumerable msgs) => msgs; + ChatMessageStore.InvokedContext InvokedFilter(ChatMessageStore.InvokedContext ctx) => ctx; + + // Act + var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, InvokingFilter, InvokedFilter); + + // Assert + Assert.NotNull(filter); + } + + [Fact] + public async Task InvokingAsync_WithNoOpFilters_ReturnsInnerStoreMessagesAsync() + { + // Arrange + var innerStoreMock = new Mock(); + var expectedMessages = new List + { + new(ChatRole.User, "Hello"), + new(ChatRole.Assistant, "Hi there!") + }; + var context = new ChatMessageStore.InvokingContext([new ChatMessage(ChatRole.User, "Test")]); + + innerStoreMock + .Setup(s => s.InvokingAsync(context, It.IsAny())) + .ReturnsAsync(expectedMessages); + + var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, x => x, x => x); + + // Act + var result = (await filter.InvokingAsync(context, CancellationToken.None)).ToList(); + + // Assert + Assert.Equal(2, result.Count); + Assert.Equal("Hello", result[0].Text); + Assert.Equal("Hi there!", result[1].Text); + innerStoreMock.Verify(s => s.InvokingAsync(context, It.IsAny()), Times.Once); + } + + [Fact] + public async Task InvokingAsync_WithInvokingFilter_AppliesFilterAsync() + { + // Arrange + var innerStoreMock = new Mock(); + var innerMessages = new List + { + new(ChatRole.User, "Hello"), + new(ChatRole.Assistant, "Hi there!"), + new(ChatRole.User, "How are you?") + }; + var context = new ChatMessageStore.InvokingContext([new ChatMessage(ChatRole.User, "Test")]); + + innerStoreMock + .Setup(s => s.InvokingAsync(context, It.IsAny())) + .ReturnsAsync(innerMessages); + + // Filter to only user messages + IEnumerable InvokingFilter(IEnumerable msgs) => msgs.Where(m => m.Role == ChatRole.User); + + var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, InvokingFilter); + + // Act + var result = (await filter.InvokingAsync(context, CancellationToken.None)).ToList(); + + // Assert + Assert.Equal(2, result.Count); + Assert.All(result, msg => Assert.Equal(ChatRole.User, msg.Role)); + innerStoreMock.Verify(s => s.InvokingAsync(context, It.IsAny()), Times.Once); + } + + [Fact] + public async Task InvokingAsync_WithInvokingFilter_CanModifyMessagesAsync() + { + // Arrange + var innerStoreMock = new Mock(); + var innerMessages = new List + { + new(ChatRole.User, "Hello"), + new(ChatRole.Assistant, "Hi there!") + }; + var context = new ChatMessageStore.InvokingContext([new ChatMessage(ChatRole.User, "Test")]); + + innerStoreMock + .Setup(s => s.InvokingAsync(context, It.IsAny())) + .ReturnsAsync(innerMessages); + + // Filter that transforms messages + IEnumerable InvokingFilter(IEnumerable msgs) => + msgs.Select(m => new ChatMessage(m.Role, $"[FILTERED] {m.Text}")); + + var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, InvokingFilter); + + // Act + var result = (await filter.InvokingAsync(context, CancellationToken.None)).ToList(); + + // Assert + Assert.Equal(2, result.Count); + Assert.Equal("[FILTERED] Hello", result[0].Text); + Assert.Equal("[FILTERED] Hi there!", result[1].Text); + } + + [Fact] + public async Task InvokedAsync_WithInvokedFilter_AppliesFilterAsync() + { + // Arrange + var innerStoreMock = new Mock(); + var requestMessages = new List { new(ChatRole.User, "Hello") }; + var chatMessageStoreMessages = new List { new(ChatRole.System, "System") }; + var responseMessages = new List { new(ChatRole.Assistant, "Response") }; + var context = new ChatMessageStore.InvokedContext(requestMessages, chatMessageStoreMessages) + { + ResponseMessages = responseMessages + }; + + ChatMessageStore.InvokedContext? capturedContext = null; + innerStoreMock + .Setup(s => s.InvokedAsync(It.IsAny(), It.IsAny())) + .Callback((ctx, ct) => capturedContext = ctx) + .Returns(default(ValueTask)); + + // Filter that modifies the context + ChatMessageStore.InvokedContext InvokedFilter(ChatMessageStore.InvokedContext ctx) + { + var modifiedRequestMessages = ctx.RequestMessages.Select(m => new ChatMessage(m.Role, $"[FILTERED] {m.Text}")).ToList(); + return new ChatMessageStore.InvokedContext(modifiedRequestMessages, ctx.ChatMessageStoreMessages) + { + ResponseMessages = ctx.ResponseMessages, + AIContextProviderMessages = ctx.AIContextProviderMessages, + InvokeException = ctx.InvokeException + }; + } + + var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, invokedMessagesFilter: InvokedFilter); + + // Act + await filter.InvokedAsync(context, CancellationToken.None); + + // Assert + Assert.NotNull(capturedContext); + Assert.Single(capturedContext.RequestMessages); + Assert.Equal("[FILTERED] Hello", capturedContext.RequestMessages.First().Text); + innerStoreMock.Verify(s => s.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); + } + + [Fact] + public void Serialize_DelegatesToInnerStore() + { + // Arrange + var innerStoreMock = new Mock(); + var expectedJson = JsonSerializer.SerializeToElement("data", TestJsonSerializerContext.Default.String); + + innerStoreMock + .Setup(s => s.Serialize(It.IsAny())) + .Returns(expectedJson); + + var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, x => x, x => x); + + // Act + var result = filter.Serialize(); + + // Assert + Assert.Equal(expectedJson.GetRawText(), result.GetRawText()); + innerStoreMock.Verify(s => s.Serialize(null), Times.Once); + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreTests.cs index 4100b20f5a..883941458c 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreTests.cs @@ -78,11 +78,11 @@ public void GetService_Generic_ReturnsNullForUnrelatedType() private sealed class TestChatMessageStore : ChatMessageStore { - public override Task> GetMessagesAsync(CancellationToken cancellationToken = default) - => Task.FromResult>([]); + public override ValueTask> InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default) + => new(Array.Empty()); - public override Task AddMessagesAsync(IEnumerable messages, CancellationToken cancellationToken = default) - => Task.CompletedTask; + public override ValueTask InvokedAsync(InvokedContext context, CancellationToken cancellationToken = default) + => default; public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptions = null) => default; diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryChatMessageStoreTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryChatMessageStoreTests.cs index 824fb62f6d..43bfacca79 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryChatMessageStoreTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryChatMessageStoreTests.cs @@ -47,34 +47,54 @@ public void Constructor_Arguments_SetOnPropertiesCorrectly() } [Fact] - public async Task AddMessagesAsyncAddsMessagesAndReturnsNullThreadIdAsync() + public async Task InvokedAsyncAddsMessagesAsync() { - var store = new InMemoryChatMessageStore(); - var messages = new List + var requestMessages = new List + { + new(ChatRole.User, "Hello") + }; + var responseMessages = new List { - new(ChatRole.User, "Hello"), new(ChatRole.Assistant, "Hi there!") }; + var messageStoreMessages = new List() + { + new(ChatRole.System, "original instructions") + }; + var aiContextProviderMessages = new List() + { + new(ChatRole.System, "additional context") + }; - await store.AddMessagesAsync(messages, CancellationToken.None); + var store = new InMemoryChatMessageStore(); + store.Add(messageStoreMessages[0]); + var context = new ChatMessageStore.InvokedContext(requestMessages, messageStoreMessages) + { + AIContextProviderMessages = aiContextProviderMessages, + ResponseMessages = responseMessages + }; + await store.InvokedAsync(context, CancellationToken.None); - Assert.Equal(2, store.Count); - Assert.Equal("Hello", store[0].Text); - Assert.Equal("Hi there!", store[1].Text); + Assert.Equal(4, store.Count); + Assert.Equal("original instructions", store[0].Text); + Assert.Equal("Hello", store[1].Text); + Assert.Equal("additional context", store[2].Text); + Assert.Equal("Hi there!", store[3].Text); } [Fact] - public async Task AddMessagesAsyncWithEmptyDoesNotFailAsync() + public async Task InvokedAsyncWithEmptyDoesNotFailAsync() { var store = new InMemoryChatMessageStore(); - await store.AddMessagesAsync([], CancellationToken.None); + var context = new ChatMessageStore.InvokedContext([], []); + await store.InvokedAsync(context, CancellationToken.None); Assert.Empty(store); } [Fact] - public async Task GetMessagesAsyncReturnsAllMessagesAsync() + public async Task InvokingAsyncReturnsAllMessagesAsync() { var store = new InMemoryChatMessageStore { @@ -82,7 +102,8 @@ public async Task GetMessagesAsyncReturnsAllMessagesAsync() new ChatMessage(ChatRole.Assistant, "Test2") }; - var result = (await store.GetMessagesAsync(CancellationToken.None)).ToList(); + var context = new ChatMessageStore.InvokingContext([]); + var result = (await store.InvokingAsync(context, CancellationToken.None)).ToList(); Assert.Equal(2, result.Count); Assert.Contains(result, m => m.Text == "Test1"); @@ -157,24 +178,25 @@ public async Task SerializeAndDeserializeWorksWithExperimentalContentTypesAsync( } [Fact] - public async Task AddMessagesAsyncWithEmptyMessagesDoesNotChangeStoreAsync() + public async Task InvokedAsyncWithEmptyMessagesDoesNotChangeStoreAsync() { var store = new InMemoryChatMessageStore(); var messages = new List(); - await store.AddMessagesAsync(messages, CancellationToken.None); + var context = new ChatMessageStore.InvokedContext(messages, []); + await store.InvokedAsync(context, CancellationToken.None); Assert.Empty(store); } [Fact] - public async Task AddMessagesAsync_WithNullMessages_ThrowsArgumentNullExceptionAsync() + public async Task InvokedAsync_WithNullContext_ThrowsArgumentNullExceptionAsync() { // Arrange var store = new InMemoryChatMessageStore(); // Act & Assert - await Assert.ThrowsAsync(() => store.AddMessagesAsync(null!, CancellationToken.None)); + await Assert.ThrowsAsync(() => store.InvokedAsync(null!, CancellationToken.None).AsTask()); } [Fact] @@ -498,7 +520,8 @@ public async Task AddMessagesAsync_WithReducer_AfterMessageAdded_InvokesReducerA var store = new InMemoryChatMessageStore(reducerMock.Object, InMemoryChatMessageStore.ChatReducerTriggerEvent.AfterMessageAdded); // Act - await store.AddMessagesAsync(originalMessages, CancellationToken.None); + var context = new ChatMessageStore.InvokedContext(originalMessages, []); + await store.InvokedAsync(context, CancellationToken.None); // Assert Assert.Single(store); @@ -526,10 +549,15 @@ public async Task GetMessagesAsync_WithReducer_BeforeMessagesRetrieval_InvokesRe .ReturnsAsync(reducedMessages); var store = new InMemoryChatMessageStore(reducerMock.Object, InMemoryChatMessageStore.ChatReducerTriggerEvent.BeforeMessagesRetrieval); - await store.AddMessagesAsync(originalMessages, CancellationToken.None); + // Add messages directly to the store for this test + foreach (var msg in originalMessages) + { + store.Add(msg); + } // Act - var result = (await store.GetMessagesAsync(CancellationToken.None)).ToList(); + var invokingContext = new ChatMessageStore.InvokingContext(Array.Empty()); + var result = (await store.InvokingAsync(invokingContext, CancellationToken.None)).ToList(); // Assert Assert.Single(result); @@ -551,7 +579,8 @@ public async Task AddMessagesAsync_WithReducer_ButWrongTrigger_DoesNotInvokeRedu var store = new InMemoryChatMessageStore(reducerMock.Object, InMemoryChatMessageStore.ChatReducerTriggerEvent.BeforeMessagesRetrieval); // Act - await store.AddMessagesAsync(originalMessages, CancellationToken.None); + var context = new ChatMessageStore.InvokedContext(originalMessages, []); + await store.InvokedAsync(context, CancellationToken.None); // Assert Assert.Single(store); @@ -576,7 +605,8 @@ public async Task GetMessagesAsync_WithReducer_ButWrongTrigger_DoesNotInvokeRedu }; // Act - var result = (await store.GetMessagesAsync(CancellationToken.None)).ToList(); + var invokingContext = new ChatMessageStore.InvokingContext(Array.Empty()); + var result = (await store.InvokingAsync(invokingContext, CancellationToken.None)).ToList(); // Assert Assert.Single(result); diff --git a/dotnet/tests/Microsoft.Agents.AI.CosmosNoSql.UnitTests/CosmosChatMessageStoreTests.cs b/dotnet/tests/Microsoft.Agents.AI.CosmosNoSql.UnitTests/CosmosChatMessageStoreTests.cs index 3dbd3ec367..9410e68f1b 100644 --- a/dotnet/tests/Microsoft.Agents.AI.CosmosNoSql.UnitTests/CosmosChatMessageStoreTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.CosmosNoSql.UnitTests/CosmosChatMessageStoreTests.cs @@ -202,11 +202,11 @@ public void Constructor_WithEmptyConversationId_ShouldThrowArgumentException() #endregion - #region AddMessagesAsync Tests + #region InvokedAsync Tests [SkippableFact] [Trait("Category", "CosmosDB")] - public async Task AddMessagesAsync_WithSingleMessage_ShouldAddMessageAsync() + public async Task InvokedAsync_WithSingleMessage_ShouldAddMessageAsync() { // Arrange this.SkipIfEmulatorNotAvailable(); @@ -214,14 +214,20 @@ public async Task AddMessagesAsync_WithSingleMessage_ShouldAddMessageAsync() using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, conversationId); var message = new ChatMessage(ChatRole.User, "Hello, world!"); + var context = new ChatMessageStore.InvokedContext([message], []) + { + ResponseMessages = [] + }; + // Act - await store.AddMessagesAsync([message]); + await store.InvokedAsync(context); // Wait a moment for eventual consistency await Task.Delay(100); // Assert - var messages = await store.GetMessagesAsync(); + var invokingContext = new ChatMessageStore.InvokingContext([]); + var messages = await store.InvokingAsync(invokingContext); var messageList = messages.ToList(); // Simple assertion - if this fails, we know the deserialization is the issue @@ -256,7 +262,7 @@ public async Task AddMessagesAsync_WithSingleMessage_ShouldAddMessageAsync() } string rawJson = rawResults.Count > 0 ? Newtonsoft.Json.JsonConvert.SerializeObject(rawResults[0], Newtonsoft.Json.Formatting.Indented) : "null"; - Assert.Fail($"GetMessagesAsync returned 0 messages, but direct count query found {count} items for conversation {conversationId}. Raw document: {rawJson}"); + Assert.Fail($"InvokingAsync returned 0 messages, but direct count query found {count} items for conversation {conversationId}. Raw document: {rawJson}"); } Assert.Single(messageList); @@ -266,45 +272,63 @@ public async Task AddMessagesAsync_WithSingleMessage_ShouldAddMessageAsync() [SkippableFact] [Trait("Category", "CosmosDB")] - public async Task AddMessagesAsync_WithMultipleMessages_ShouldAddAllMessagesAsync() + public async Task InvokedAsync_WithMultipleMessages_ShouldAddAllMessagesAsync() { // Arrange this.SkipIfEmulatorNotAvailable(); var conversationId = Guid.NewGuid().ToString(); using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, conversationId); - var messages = new[] + var requestMessages = new[] { new ChatMessage(ChatRole.User, "First message"), new ChatMessage(ChatRole.Assistant, "Second message"), new ChatMessage(ChatRole.User, "Third message") }; + var aiContextProviderMessages = new[] + { + new ChatMessage(ChatRole.System, "System context message") + }; + var responseMessages = new[] + { + new ChatMessage(ChatRole.Assistant, "Response message") + }; + + var context = new ChatMessageStore.InvokedContext(requestMessages, []) + { + AIContextProviderMessages = aiContextProviderMessages, + ResponseMessages = responseMessages + }; // Act - await store.AddMessagesAsync(messages); + await store.InvokedAsync(context); // Assert - var retrievedMessages = await store.GetMessagesAsync(); + var invokingContext = new ChatMessageStore.InvokingContext([]); + var retrievedMessages = await store.InvokingAsync(invokingContext); var messageList = retrievedMessages.ToList(); - Assert.Equal(3, messageList.Count); + Assert.Equal(5, messageList.Count); Assert.Equal("First message", messageList[0].Text); Assert.Equal("Second message", messageList[1].Text); Assert.Equal("Third message", messageList[2].Text); + Assert.Equal("System context message", messageList[3].Text); + Assert.Equal("Response message", messageList[4].Text); } #endregion - #region GetMessagesAsync Tests + #region InvokingAsync Tests [SkippableFact] [Trait("Category", "CosmosDB")] - public async Task GetMessagesAsync_WithNoMessages_ShouldReturnEmptyAsync() + public async Task InvokingAsync_WithNoMessages_ShouldReturnEmptyAsync() { // Arrange this.SkipIfEmulatorNotAvailable(); using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, Guid.NewGuid().ToString()); // Act - var messages = await store.GetMessagesAsync(); + var invokingContext = new ChatMessageStore.InvokingContext([]); + var messages = await store.InvokingAsync(invokingContext); // Assert Assert.Empty(messages); @@ -312,7 +336,7 @@ public async Task GetMessagesAsync_WithNoMessages_ShouldReturnEmptyAsync() [SkippableFact] [Trait("Category", "CosmosDB")] - public async Task GetMessagesAsync_WithConversationIsolation_ShouldOnlyReturnMessagesForConversationAsync() + public async Task InvokingAsync_WithConversationIsolation_ShouldOnlyReturnMessagesForConversationAsync() { // Arrange this.SkipIfEmulatorNotAvailable(); @@ -322,12 +346,18 @@ public async Task GetMessagesAsync_WithConversationIsolation_ShouldOnlyReturnMes using var store1 = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, conversation1); using var store2 = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, conversation2); - await store1.AddMessagesAsync([new ChatMessage(ChatRole.User, "Message for conversation 1")]); - await store2.AddMessagesAsync([new ChatMessage(ChatRole.User, "Message for conversation 2")]); + var context1 = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Message for conversation 1")], []); + var context2 = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Message for conversation 2")], []); + + await store1.InvokedAsync(context1); + await store2.InvokedAsync(context2); // Act - var messages1 = await store1.GetMessagesAsync(); - var messages2 = await store2.GetMessagesAsync(); + var invokingContext1 = new ChatMessageStore.InvokingContext([]); + var invokingContext2 = new ChatMessageStore.InvokingContext([]); + + var messages1 = await store1.InvokingAsync(invokingContext1); + var messages2 = await store2.InvokingAsync(invokingContext2); // Assert var messageList1 = messages1.ToList(); @@ -361,16 +391,18 @@ public async Task FullWorkflow_AddAndGet_ShouldWorkCorrectlyAsync() }; // Act 1: Add messages - await originalStore.AddMessagesAsync(messages); + var invokedContext = new ChatMessageStore.InvokedContext(messages, []); + await originalStore.InvokedAsync(invokedContext); // Act 2: Verify messages were added - var retrievedMessages = await originalStore.GetMessagesAsync(); + var invokingContext = new ChatMessageStore.InvokingContext([]); + var retrievedMessages = await originalStore.InvokingAsync(invokingContext); var retrievedList = retrievedMessages.ToList(); Assert.Equal(5, retrievedList.Count); // Act 3: Create new store instance for same conversation (test persistence) using var newStore = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, conversationId); - var persistedMessages = await newStore.GetMessagesAsync(); + var persistedMessages = await newStore.InvokingAsync(invokingContext); var persistedList = persistedMessages.ToList(); // Assert final state @@ -502,7 +534,7 @@ public void Constructor_WithHierarchicalWhitespaceSessionId_ShouldThrowArgumentE [SkippableFact] [Trait("Category", "CosmosDB")] - public async Task AddMessagesAsync_WithHierarchicalPartitioning_ShouldAddMessageWithMetadataAsync() + public async Task InvokedAsync_WithHierarchicalPartitioning_ShouldAddMessageWithMetadataAsync() { // Arrange this.SkipIfEmulatorNotAvailable(); @@ -513,14 +545,17 @@ public async Task AddMessagesAsync_WithHierarchicalPartitioning_ShouldAddMessage using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId, SessionId); var message = new ChatMessage(ChatRole.User, "Hello from hierarchical partitioning!"); + var context = new ChatMessageStore.InvokedContext([message], []); + // Act - await store.AddMessagesAsync([message]); + await store.InvokedAsync(context); // Wait a moment for eventual consistency await Task.Delay(100); // Assert - var messages = await store.GetMessagesAsync(); + var invokingContext = new ChatMessageStore.InvokingContext([]); + var messages = await store.InvokingAsync(invokingContext); var messageList = messages.ToList(); Assert.Single(messageList); @@ -551,7 +586,7 @@ public async Task AddMessagesAsync_WithHierarchicalPartitioning_ShouldAddMessage [SkippableFact] [Trait("Category", "CosmosDB")] - public async Task AddMessagesAsync_WithHierarchicalMultipleMessages_ShouldAddAllMessagesAsync() + public async Task InvokedAsync_WithHierarchicalMultipleMessages_ShouldAddAllMessagesAsync() { // Arrange this.SkipIfEmulatorNotAvailable(); @@ -567,14 +602,17 @@ public async Task AddMessagesAsync_WithHierarchicalMultipleMessages_ShouldAddAll new ChatMessage(ChatRole.User, "Third hierarchical message") }; + var context = new ChatMessageStore.InvokedContext(messages, []); + // Act - await store.AddMessagesAsync(messages); + await store.InvokedAsync(context); // Wait a moment for eventual consistency await Task.Delay(100); // Assert - var retrievedMessages = await store.GetMessagesAsync(); + var invokingContext = new ChatMessageStore.InvokingContext([]); + var retrievedMessages = await store.InvokingAsync(invokingContext); var messageList = retrievedMessages.ToList(); Assert.Equal(3, messageList.Count); @@ -585,7 +623,7 @@ public async Task AddMessagesAsync_WithHierarchicalMultipleMessages_ShouldAddAll [SkippableFact] [Trait("Category", "CosmosDB")] - public async Task GetMessagesAsync_WithHierarchicalPartitionIsolation_ShouldIsolateMessagesByUserIdAsync() + public async Task InvokingAsync_WithHierarchicalPartitionIsolation_ShouldIsolateMessagesByUserIdAsync() { // Arrange this.SkipIfEmulatorNotAvailable(); @@ -599,17 +637,23 @@ public async Task GetMessagesAsync_WithHierarchicalPartitionIsolation_ShouldIsol using var store2 = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId2, SessionId); // Add messages to both stores - await store1.AddMessagesAsync([new ChatMessage(ChatRole.User, "Message from user 1")]); - await store2.AddMessagesAsync([new ChatMessage(ChatRole.User, "Message from user 2")]); + var context1 = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Message from user 1")], []); + var context2 = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Message from user 2")], []); + + await store1.InvokedAsync(context1); + await store2.InvokedAsync(context2); // Wait a moment for eventual consistency await Task.Delay(100); // Act & Assert - var messages1 = await store1.GetMessagesAsync(); + var invokingContext1 = new ChatMessageStore.InvokingContext([]); + var invokingContext2 = new ChatMessageStore.InvokingContext([]); + + var messages1 = await store1.InvokingAsync(invokingContext1); var messageList1 = messages1.ToList(); - var messages2 = await store2.GetMessagesAsync(); + var messages2 = await store2.InvokingAsync(invokingContext2); var messageList2 = messages2.ToList(); // With true hierarchical partitioning, each user sees only their own messages @@ -630,7 +674,9 @@ public async Task SerializeDeserialize_WithHierarchicalPartitioning_ShouldPreser const string SessionId = "session-serialize"; using var originalStore = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId, SessionId); - await originalStore.AddMessagesAsync([new ChatMessage(ChatRole.User, "Test serialization message")]); + + var context = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Test serialization message")], []); + await originalStore.InvokedAsync(context); // Act - Serialize the store state var serializedState = originalStore.Serialize(); @@ -647,7 +693,8 @@ public async Task SerializeDeserialize_WithHierarchicalPartitioning_ShouldPreser await Task.Delay(100); // Assert - The deserialized store should have the same functionality - var messages = await deserializedStore.GetMessagesAsync(); + var invokingContext = new ChatMessageStore.InvokingContext([]); + var messages = await deserializedStore.InvokingAsync(invokingContext); var messageList = messages.ToList(); Assert.Single(messageList); @@ -670,17 +717,22 @@ public async Task HierarchicalAndSimplePartitioning_ShouldCoexistAsync() using var hierarchicalStore = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, "tenant-coexist", "user-coexist", SessionId); // Add messages to both - await simpleStore.AddMessagesAsync([new ChatMessage(ChatRole.User, "Simple partitioning message")]); - await hierarchicalStore.AddMessagesAsync([new ChatMessage(ChatRole.User, "Hierarchical partitioning message")]); + var simpleContext = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Simple partitioning message")], []); + var hierarchicalContext = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Hierarchical partitioning message")], []); + + await simpleStore.InvokedAsync(simpleContext); + await hierarchicalStore.InvokedAsync(hierarchicalContext); // Wait a moment for eventual consistency await Task.Delay(100); // Act & Assert - var simpleMessages = await simpleStore.GetMessagesAsync(); + var invokingContext = new ChatMessageStore.InvokingContext([]); + + var simpleMessages = await simpleStore.InvokingAsync(invokingContext); var simpleMessageList = simpleMessages.ToList(); - var hierarchicalMessages = await hierarchicalStore.GetMessagesAsync(); + var hierarchicalMessages = await hierarchicalStore.InvokingAsync(invokingContext); var hierarchicalMessageList = hierarchicalMessages.ToList(); // Each should only see its own messages since they use different containers @@ -707,14 +759,17 @@ public async Task MaxMessagesToRetrieve_ShouldLimitAndReturnMostRecentAsync() messages.Add(new ChatMessage(ChatRole.User, $"Message {i}")); await Task.Delay(10); // Small delay to ensure different timestamps } - await store.AddMessagesAsync(messages); + + var context = new ChatMessageStore.InvokedContext(messages, []); + await store.InvokedAsync(context); // Wait for eventual consistency await Task.Delay(100); // Act - Set max to 5 and retrieve store.MaxMessagesToRetrieve = 5; - var retrievedMessages = await store.GetMessagesAsync(); + var invokingContext = new ChatMessageStore.InvokingContext([]); + var retrievedMessages = await store.InvokingAsync(invokingContext); var messageList = retrievedMessages.ToList(); // Assert - Should get the 5 most recent messages (6-10) in ascending order @@ -742,13 +797,16 @@ public async Task MaxMessagesToRetrieve_Null_ShouldReturnAllMessagesAsync() { messages.Add(new ChatMessage(ChatRole.User, $"Message {i}")); } - await store.AddMessagesAsync(messages); + + var context = new ChatMessageStore.InvokedContext(messages, []); + await store.InvokedAsync(context); // Wait for eventual consistency await Task.Delay(100); // Act - No limit set (default null) - var retrievedMessages = await store.GetMessagesAsync(); + var invokingContext = new ChatMessageStore.InvokingContext([]); + var retrievedMessages = await store.InvokingAsync(invokingContext); var messageList = retrievedMessages.ToList(); // Assert - Should get all 10 messages diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/AIAgentExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/AIAgentExtensionsTests.cs new file mode 100644 index 0000000000..0d5b895974 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/AIAgentExtensionsTests.cs @@ -0,0 +1,218 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using A2A; +using Microsoft.Extensions.AI; +using Moq; +using Moq.Protected; + +namespace Microsoft.Agents.AI.Hosting.A2A.UnitTests; + +/// +/// Unit tests for the class. +/// +public sealed class AIAgentExtensionsTests +{ + /// + /// Verifies that when messageSendParams.Metadata is null, the options passed to RunAsync are null. + /// + [Fact] + public async Task MapA2A_WhenMetadataIsNull_PassesNullOptionsToRunAsync() + { + // Arrange + AgentRunOptions? capturedOptions = null; + ITaskManager taskManager = CreateAgentMock(options => capturedOptions = options).Object.MapA2A(); + + // Act + await InvokeOnMessageReceivedAsync(taskManager, new MessageSendParams + { + Message = new AgentMessage { MessageId = "test-id", Role = MessageRole.User, Parts = [new TextPart { Text = "Hello" }] }, + Metadata = null + }); + + // Assert + Assert.Null(capturedOptions); + } + + /// + /// Verifies that when messageSendParams.Metadata has values, the options.AdditionalProperties contains the converted values. + /// + [Fact] + public async Task MapA2A_WhenMetadataHasValues_PassesOptionsWithAdditionalPropertiesToRunAsync() + { + // Arrange + AgentRunOptions? capturedOptions = null; + ITaskManager taskManager = CreateAgentMock(options => capturedOptions = options).Object.MapA2A(); + + // Act + await InvokeOnMessageReceivedAsync(taskManager, new MessageSendParams + { + Message = new AgentMessage { MessageId = "test-id", Role = MessageRole.User, Parts = [new TextPart { Text = "Hello" }] }, + Metadata = new Dictionary + { + ["key1"] = JsonSerializer.SerializeToElement("value1"), + ["key2"] = JsonSerializer.SerializeToElement(42) + } + }); + + // Assert + Assert.NotNull(capturedOptions); + Assert.NotNull(capturedOptions.AdditionalProperties); + Assert.Equal(2, capturedOptions.AdditionalProperties.Count); + Assert.True(capturedOptions.AdditionalProperties.ContainsKey("key1")); + Assert.True(capturedOptions.AdditionalProperties.ContainsKey("key2")); + } + + /// + /// Verifies that when messageSendParams.Metadata is an empty dictionary, the options passed to RunAsync is null + /// because the ToAdditionalProperties extension method returns null for empty dictionaries. + /// + [Fact] + public async Task MapA2A_WhenMetadataIsEmptyDictionary_PassesNullOptionsToRunAsync() + { + // Arrange + AgentRunOptions? capturedOptions = null; + ITaskManager taskManager = CreateAgentMock(options => capturedOptions = options).Object.MapA2A(); + + // Act + await InvokeOnMessageReceivedAsync(taskManager, new MessageSendParams + { + Message = new AgentMessage { MessageId = "test-id", Role = MessageRole.User, Parts = [new TextPart { Text = "Hello" }] }, + Metadata = [] + }); + + // Assert + Assert.Null(capturedOptions); + } + + /// + /// Verifies that when the agent response has AdditionalProperties, the returned AgentMessage.Metadata contains the converted values. + /// + [Fact] + public async Task MapA2A_WhenResponseHasAdditionalProperties_ReturnsAgentMessageWithMetadataAsync() + { + // Arrange + AdditionalPropertiesDictionary additionalProps = new() + { + ["responseKey1"] = "responseValue1", + ["responseKey2"] = 123 + }; + AgentRunResponse response = new([new ChatMessage(ChatRole.Assistant, "Test response")]) + { + AdditionalProperties = additionalProps + }; + ITaskManager taskManager = CreateAgentMockWithResponse(response).Object.MapA2A(); + + // Act + A2AResponse a2aResponse = await InvokeOnMessageReceivedAsync(taskManager, new MessageSendParams + { + Message = new AgentMessage { MessageId = "test-id", Role = MessageRole.User, Parts = [new TextPart { Text = "Hello" }] } + }); + + // Assert + AgentMessage agentMessage = Assert.IsType(a2aResponse); + Assert.NotNull(agentMessage.Metadata); + Assert.Equal(2, agentMessage.Metadata.Count); + Assert.True(agentMessage.Metadata.ContainsKey("responseKey1")); + Assert.True(agentMessage.Metadata.ContainsKey("responseKey2")); + Assert.Equal("responseValue1", agentMessage.Metadata["responseKey1"].GetString()); + Assert.Equal(123, agentMessage.Metadata["responseKey2"].GetInt32()); + } + + /// + /// Verifies that when the agent response has null AdditionalProperties, the returned AgentMessage.Metadata is null. + /// + [Fact] + public async Task MapA2A_WhenResponseHasNullAdditionalProperties_ReturnsAgentMessageWithNullMetadataAsync() + { + // Arrange + AgentRunResponse response = new([new ChatMessage(ChatRole.Assistant, "Test response")]) + { + AdditionalProperties = null + }; + ITaskManager taskManager = CreateAgentMockWithResponse(response).Object.MapA2A(); + + // Act + A2AResponse a2aResponse = await InvokeOnMessageReceivedAsync(taskManager, new MessageSendParams + { + Message = new AgentMessage { MessageId = "test-id", Role = MessageRole.User, Parts = [new TextPart { Text = "Hello" }] } + }); + + // Assert + AgentMessage agentMessage = Assert.IsType(a2aResponse); + Assert.Null(agentMessage.Metadata); + } + + /// + /// Verifies that when the agent response has empty AdditionalProperties, the returned AgentMessage.Metadata is null. + /// + [Fact] + public async Task MapA2A_WhenResponseHasEmptyAdditionalProperties_ReturnsAgentMessageWithNullMetadataAsync() + { + // Arrange + AgentRunResponse response = new([new ChatMessage(ChatRole.Assistant, "Test response")]) + { + AdditionalProperties = [] + }; + ITaskManager taskManager = CreateAgentMockWithResponse(response).Object.MapA2A(); + + // Act + A2AResponse a2aResponse = await InvokeOnMessageReceivedAsync(taskManager, new MessageSendParams + { + Message = new AgentMessage { MessageId = "test-id", Role = MessageRole.User, Parts = [new TextPart { Text = "Hello" }] } + }); + + // Assert + AgentMessage agentMessage = Assert.IsType(a2aResponse); + Assert.Null(agentMessage.Metadata); + } + + private static Mock CreateAgentMock(Action optionsCallback) + { + Mock agentMock = new() { CallBase = true }; + agentMock.SetupGet(x => x.Name).Returns("TestAgent"); + agentMock.Setup(x => x.GetNewThread()).Returns(new TestAgentThread()); + agentMock + .Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) + .Callback, AgentThread?, AgentRunOptions?, CancellationToken>( + (_, _, options, _) => optionsCallback(options)) + .ReturnsAsync(new AgentRunResponse([new ChatMessage(ChatRole.Assistant, "Test response")])); + + return agentMock; + } + + private static Mock CreateAgentMockWithResponse(AgentRunResponse response) + { + Mock agentMock = new() { CallBase = true }; + agentMock.SetupGet(x => x.Name).Returns("TestAgent"); + agentMock.Setup(x => x.GetNewThread()).Returns(new TestAgentThread()); + agentMock + .Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) + .ReturnsAsync(response); + + return agentMock; + } + + private static async Task InvokeOnMessageReceivedAsync(ITaskManager taskManager, MessageSendParams messageSendParams) + { + Func>? handler = taskManager.OnMessageReceived; + Assert.NotNull(handler); + return await handler.Invoke(messageSendParams, CancellationToken.None); + } + + private sealed class TestAgentThread : AgentThread; +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/Converters/AdditionalPropertiesDictionaryExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/Converters/AdditionalPropertiesDictionaryExtensionsTests.cs new file mode 100644 index 0000000000..e0c8c4e96b --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/Converters/AdditionalPropertiesDictionaryExtensionsTests.cs @@ -0,0 +1,187 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Text.Json; +using Microsoft.Agents.AI.Hosting.A2A.Converters; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.Hosting.A2A.UnitTests.Converters; + +/// +/// Unit tests for the class. +/// +public sealed class AdditionalPropertiesDictionaryExtensionsTests +{ + [Fact] + public void ToA2AMetadata_WithNullAdditionalProperties_ReturnsNull() + { + // Arrange + AdditionalPropertiesDictionary? additionalProperties = null; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.Null(result); + } + + [Fact] + public void ToA2AMetadata_WithEmptyAdditionalProperties_ReturnsNull() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = []; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.Null(result); + } + + [Fact] + public void ToA2AMetadata_WithStringValue_ReturnsMetadataWithJsonElement() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new() + { + { "stringKey", "stringValue" } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("stringKey")); + Assert.Equal("stringValue", result["stringKey"].GetString()); + } + + [Fact] + public void ToA2AMetadata_WithNumericValue_ReturnsMetadataWithJsonElement() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new() + { + { "numberKey", 42 } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("numberKey")); + Assert.Equal(42, result["numberKey"].GetInt32()); + } + + [Fact] + public void ToA2AMetadata_WithBooleanValue_ReturnsMetadataWithJsonElement() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new() + { + { "booleanKey", true } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("booleanKey")); + Assert.True(result["booleanKey"].GetBoolean()); + } + + [Fact] + public void ToA2AMetadata_WithMultipleProperties_ReturnsMetadataWithAllProperties() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new() + { + { "stringKey", "stringValue" }, + { "numberKey", 42 }, + { "booleanKey", true } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Equal(3, result.Count); + + Assert.True(result.ContainsKey("stringKey")); + Assert.Equal("stringValue", result["stringKey"].GetString()); + + Assert.True(result.ContainsKey("numberKey")); + Assert.Equal(42, result["numberKey"].GetInt32()); + + Assert.True(result.ContainsKey("booleanKey")); + Assert.True(result["booleanKey"].GetBoolean()); + } + + [Fact] + public void ToA2AMetadata_WithArrayValue_ReturnsMetadataWithJsonElement() + { + // Arrange + int[] arrayValue = [1, 2, 3]; + AdditionalPropertiesDictionary additionalProperties = new() + { + { "arrayKey", arrayValue } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("arrayKey")); + Assert.Equal(JsonValueKind.Array, result["arrayKey"].ValueKind); + Assert.Equal(3, result["arrayKey"].GetArrayLength()); + } + + [Fact] + public void ToA2AMetadata_WithNullValue_ReturnsMetadataWithNullJsonElement() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new() + { + { "nullKey", null! } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("nullKey")); + Assert.Equal(JsonValueKind.Null, result["nullKey"].ValueKind); + } + + [Fact] + public void ToA2AMetadata_WithJsonElementValue_ReturnsMetadataWithJsonElement() + { + // Arrange + JsonElement jsonElement = JsonSerializer.SerializeToElement(new { name = "test", value = 123 }); + AdditionalPropertiesDictionary additionalProperties = new() + { + { "jsonElementKey", jsonElement } + }; + + // Act + Dictionary? result = additionalProperties.ToA2AMetadata(); + + // Assert + Assert.NotNull(result); + Assert.Single(result); + Assert.True(result.ContainsKey("jsonElementKey")); + Assert.Equal(JsonValueKind.Object, result["jsonElementKey"].ValueKind); + Assert.Equal("test", result["jsonElementKey"].GetProperty("name").GetString()); + Assert.Equal(123, result["jsonElementKey"].GetProperty("value").GetInt32()); + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentContinuationTokenTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentContinuationTokenTests.cs new file mode 100644 index 0000000000..a2add9634b --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentContinuationTokenTests.cs @@ -0,0 +1,128 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Linq; +using System.Text.Json; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.UnitTests.ChatClient; + +public class ChatClientAgentContinuationTokenTests +{ + [Fact] + public void ToBytes_Roundtrip() + { + // Arrange + ResponseContinuationToken originalToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3, 4, 5 }); + + ChatClientAgentContinuationToken chatClientToken = new(originalToken) + { + InputMessages = + [ + new ChatMessage(ChatRole.User, "Hello!"), + new ChatMessage(ChatRole.User, "How are you?") + ], + ResponseUpdates = + [ + new ChatResponseUpdate(ChatRole.Assistant, "I'm fine, thank you."), + new ChatResponseUpdate(ChatRole.Assistant, "How can I assist you today?") + ] + }; + + // Act + ReadOnlyMemory bytes = chatClientToken.ToBytes(); + + ChatClientAgentContinuationToken tokenFromBytes = ChatClientAgentContinuationToken.FromToken(ResponseContinuationToken.FromBytes(bytes)); + + // Assert + Assert.NotNull(tokenFromBytes); + Assert.Equal(chatClientToken.ToBytes().ToArray(), tokenFromBytes.ToBytes().ToArray()); + + // Verify InnerToken + Assert.Equal(chatClientToken.InnerToken.ToBytes().ToArray(), tokenFromBytes.InnerToken.ToBytes().ToArray()); + + // Verify InputMessages + Assert.NotNull(tokenFromBytes.InputMessages); + Assert.Equal(chatClientToken.InputMessages.Count(), tokenFromBytes.InputMessages.Count()); + for (int i = 0; i < chatClientToken.InputMessages.Count(); i++) + { + Assert.Equal(chatClientToken.InputMessages.ElementAt(i).Role, tokenFromBytes.InputMessages.ElementAt(i).Role); + Assert.Equal(chatClientToken.InputMessages.ElementAt(i).Text, tokenFromBytes.InputMessages.ElementAt(i).Text); + } + + // Verify ResponseUpdates + Assert.NotNull(tokenFromBytes.ResponseUpdates); + Assert.Equal(chatClientToken.ResponseUpdates.Count, tokenFromBytes.ResponseUpdates.Count); + for (int i = 0; i < chatClientToken.ResponseUpdates.Count; i++) + { + Assert.Equal(chatClientToken.ResponseUpdates.ElementAt(i).Role, tokenFromBytes.ResponseUpdates.ElementAt(i).Role); + Assert.Equal(chatClientToken.ResponseUpdates.ElementAt(i).Text, tokenFromBytes.ResponseUpdates.ElementAt(i).Text); + } + } + + [Fact] + public void Serialization_Roundtrip() + { + // Arrange + ResponseContinuationToken originalToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3, 4, 5 }); + + ChatClientAgentContinuationToken chatClientToken = new(originalToken) + { + InputMessages = + [ + new ChatMessage(ChatRole.User, "Hello!"), + new ChatMessage(ChatRole.User, "How are you?") + ], + ResponseUpdates = + [ + new ChatResponseUpdate(ChatRole.Assistant, "I'm fine, thank you."), + new ChatResponseUpdate(ChatRole.Assistant, "How can I assist you today?") + ] + }; + + // Act + string json = JsonSerializer.Serialize(chatClientToken, AgentAbstractionsJsonUtilities.DefaultOptions.GetTypeInfo(typeof(ResponseContinuationToken))); + + ResponseContinuationToken? deserializedToken = (ResponseContinuationToken?)JsonSerializer.Deserialize(json, AgentAbstractionsJsonUtilities.DefaultOptions.GetTypeInfo(typeof(ResponseContinuationToken))); + + ChatClientAgentContinuationToken deserializedChatClientToken = ChatClientAgentContinuationToken.FromToken(deserializedToken!); + + // Assert + Assert.NotNull(deserializedChatClientToken); + Assert.Equal(chatClientToken.ToBytes().ToArray(), deserializedChatClientToken.ToBytes().ToArray()); + + // Verify InnerToken + Assert.Equal(chatClientToken.InnerToken.ToBytes().ToArray(), deserializedChatClientToken.InnerToken.ToBytes().ToArray()); + + // Verify InputMessages + Assert.NotNull(deserializedChatClientToken.InputMessages); + Assert.Equal(chatClientToken.InputMessages.Count(), deserializedChatClientToken.InputMessages.Count()); + for (int i = 0; i < chatClientToken.InputMessages.Count(); i++) + { + Assert.Equal(chatClientToken.InputMessages.ElementAt(i).Role, deserializedChatClientToken.InputMessages.ElementAt(i).Role); + Assert.Equal(chatClientToken.InputMessages.ElementAt(i).Text, deserializedChatClientToken.InputMessages.ElementAt(i).Text); + } + + // Verify ResponseUpdates + Assert.NotNull(deserializedChatClientToken.ResponseUpdates); + Assert.Equal(chatClientToken.ResponseUpdates.Count, deserializedChatClientToken.ResponseUpdates.Count); + for (int i = 0; i < chatClientToken.ResponseUpdates.Count; i++) + { + Assert.Equal(chatClientToken.ResponseUpdates.ElementAt(i).Role, deserializedChatClientToken.ResponseUpdates.ElementAt(i).Role); + Assert.Equal(chatClientToken.ResponseUpdates.ElementAt(i).Text, deserializedChatClientToken.ResponseUpdates.ElementAt(i).Text); + } + } + + [Fact] + public void FromToken_WithChatClientAgentContinuationToken_ReturnsSameInstance() + { + // Arrange + ChatClientAgentContinuationToken originalToken = new(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3, 4, 5 })); + + // Act + ChatClientAgentContinuationToken fromToken = ChatClientAgentContinuationToken.FromToken(originalToken); + + // Assert + Assert.Same(originalToken, fromToken); + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentTests.cs index 6e9d952b57..29d3d3afee 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentTests.cs @@ -502,6 +502,12 @@ public async Task RunAsyncUsesChatMessageStoreFactoryWhenProvidedAndNoConversati It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")])); Mock mockChatMessageStore = new(); + mockChatMessageStore.Setup(s => s.InvokingAsync( + It.IsAny(), + It.IsAny())).ReturnsAsync([new ChatMessage(ChatRole.User, "Existing Chat History")]); + mockChatMessageStore.Setup(s => s.InvokedAsync( + It.IsAny(), + It.IsAny())).Returns(new ValueTask()); Mock> mockFactory = new(); mockFactory.Setup(f => f(It.IsAny())).Returns(mockChatMessageStore.Object); @@ -518,7 +524,58 @@ public async Task RunAsyncUsesChatMessageStoreFactoryWhenProvidedAndNoConversati // Assert Assert.IsType(thread!.MessageStore, exactMatch: false); - mockChatMessageStore.Verify(s => s.AddMessagesAsync(It.Is>(x => x.Count() == 2), It.IsAny()), Times.Once); + mockService.Verify( + x => x.GetResponseAsync( + It.Is>(msgs => msgs.Count() == 2 && msgs.Any(m => m.Text == "Existing Chat History") && msgs.Any(m => m.Text == "test")), + It.IsAny(), + It.IsAny()), + Times.Once); + mockChatMessageStore.Verify(s => s.InvokingAsync( + It.Is(x => x.RequestMessages.Count() == 1), + It.IsAny()), + Times.Once); + mockChatMessageStore.Verify(s => s.InvokedAsync( + It.Is(x => x.RequestMessages.Count() == 1 && x.ChatMessageStoreMessages.Count() == 1 && x.ResponseMessages!.Count() == 1), + It.IsAny()), + Times.Once); + mockFactory.Verify(f => f(It.IsAny()), Times.Once); + } + + /// + /// Verify that RunAsync notifies the ChatMessageStore on failure. + /// + [Fact] + public async Task RunAsyncNotifiesChatMessageStoreOnFailureAsync() + { + // Arrange + Mock mockService = new(); + mockService.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).Throws(new InvalidOperationException("Test Error")); + + Mock mockChatMessageStore = new(); + + Mock> mockFactory = new(); + mockFactory.Setup(f => f(It.IsAny())).Returns(mockChatMessageStore.Object); + + ChatClientAgent agent = new(mockService.Object, options: new() + { + ChatOptions = new() { Instructions = "test instructions" }, + ChatMessageStoreFactory = mockFactory.Object + }); + + // Act + ChatClientAgentThread? thread = agent.GetNewThread() as ChatClientAgentThread; + await Assert.ThrowsAsync(() => agent.RunAsync([new(ChatRole.User, "test")], thread)); + + // Assert + Assert.IsType(thread!.MessageStore, exactMatch: false); + mockChatMessageStore.Verify(s => s.InvokedAsync( + It.Is(x => x.RequestMessages.Count() == 1 && x.ResponseMessages == null && x.InvokeException!.Message == "Test Error"), + It.IsAny()), + Times.Once); mockFactory.Verify(f => f(It.IsAny()), Times.Once); } @@ -599,18 +656,18 @@ public async Task RunAsyncInvokesAIContextProviderAndUsesResultAsync() await agent.RunAsync(requestMessages, thread); // Assert - // Should contain: base instructions, context message, user message, base function, context function + // Should contain: base instructions, user message, context message, base function, context function Assert.Equal(2, capturedMessages.Count); Assert.Equal("base instructions\ncontext provider instructions", capturedInstructions); - Assert.Equal("context provider message", capturedMessages[0].Text); - Assert.Equal(ChatRole.System, capturedMessages[0].Role); - Assert.Equal("user message", capturedMessages[1].Text); - Assert.Equal(ChatRole.User, capturedMessages[1].Role); + Assert.Equal("user message", capturedMessages[0].Text); + Assert.Equal(ChatRole.User, capturedMessages[0].Role); + Assert.Equal("context provider message", capturedMessages[1].Text); + Assert.Equal(ChatRole.System, capturedMessages[1].Role); Assert.Equal(2, capturedTools.Count); Assert.Contains(capturedTools, t => t.Name == "base function"); Assert.Contains(capturedTools, t => t.Name == "context provider function"); - // Verify that the thread was updated with the input, ai context and response messages + // Verify that the thread was updated with the ai context provider, input and response messages var messageStore = Assert.IsType(thread!.MessageStore); Assert.Equal(3, messageStore.Count); Assert.Equal("user message", messageStore[0].Text); @@ -2056,18 +2113,18 @@ public async Task RunStreamingAsyncInvokesAIContextProviderAndUsesResultAsync() _ = await updates.ToAgentRunResponseAsync(); // Assert - // Should contain: base instructions, context message, user message, base function, context function + // Should contain: base instructions, user message, context message, base function, context function Assert.Equal(2, capturedMessages.Count); Assert.Equal("base instructions\ncontext provider instructions", capturedInstructions); - Assert.Equal("context provider message", capturedMessages[0].Text); - Assert.Equal(ChatRole.System, capturedMessages[0].Role); - Assert.Equal("user message", capturedMessages[1].Text); - Assert.Equal(ChatRole.User, capturedMessages[1].Role); + Assert.Equal("user message", capturedMessages[0].Text); + Assert.Equal(ChatRole.User, capturedMessages[0].Role); + Assert.Equal("context provider message", capturedMessages[1].Text); + Assert.Equal(ChatRole.System, capturedMessages[1].Role); Assert.Equal(2, capturedTools.Count); Assert.Contains(capturedTools, t => t.Name == "base function"); Assert.Contains(capturedTools, t => t.Name == "context provider function"); - // Verify that the thread was updated with the input, ai context and response messages + // Verify that the thread was updated with the input, ai context provider, and response messages var messageStore = Assert.IsType(thread!.MessageStore); Assert.Equal(3, messageStore.Count); Assert.Equal("user message", messageStore[0].Text); diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_BackgroundResponsesTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_BackgroundResponsesTests.cs index 583a0815ca..cfccb7267a 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_BackgroundResponsesTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_BackgroundResponsesTests.cs @@ -18,10 +18,10 @@ public class ChatClientAgent_BackgroundResponsesTests [Theory] [InlineData(true)] [InlineData(false)] - public async Task RunAsyncPropagatesBackgroundResponsesPropertiesToChatClientAsync(bool providePropsViaChatOptions) + public async Task RunAsync_PropagatesBackgroundResponsesPropertiesToChatClientAsync(bool providePropsViaChatOptions) { // Arrange - var continuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }); + var continuationToken = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })); ChatOptions? capturedChatOptions = null; Mock mockChatClient = new(); mockChatClient @@ -63,15 +63,15 @@ public async Task RunAsyncPropagatesBackgroundResponsesPropertiesToChatClientAsy // Assert Assert.NotNull(capturedChatOptions); Assert.True(capturedChatOptions.AllowBackgroundResponses); - Assert.Same(continuationToken, capturedChatOptions.ContinuationToken); + Assert.Same(continuationToken.InnerToken, capturedChatOptions.ContinuationToken); } [Fact] - public async Task RunAsyncPrioritizesBackgroundResponsesPropertiesFromAgentRunOptionsOverOnesFromChatOptionsAsync() + public async Task RunAsync_WhenPropertiesSetInBothLocations_PrioritizesAgentRunOptionsOverChatOptionsAsync() { // Arrange - var continuationToken1 = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }); - var continuationToken2 = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }); + var continuationToken1 = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })); + var continuationToken2 = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })); ChatOptions? capturedChatOptions = null; Mock mockChatClient = new(); mockChatClient @@ -104,13 +104,13 @@ public async Task RunAsyncPrioritizesBackgroundResponsesPropertiesFromAgentRunOp // Assert Assert.NotNull(capturedChatOptions); Assert.False(capturedChatOptions.AllowBackgroundResponses); - Assert.Same(continuationToken2, capturedChatOptions.ContinuationToken); + Assert.Same(continuationToken2.InnerToken, capturedChatOptions.ContinuationToken); } [Theory] [InlineData(true)] [InlineData(false)] - public async Task RunStreamingAsyncPropagatesBackgroundResponsesPropertiesToChatClientAsync(bool providePropsViaChatOptions) + public async Task RunStreamingAsync_PropagatesBackgroundResponsesPropertiesToChatClientAsync(bool providePropsViaChatOptions) { // Arrange ChatResponseUpdate[] returnUpdates = @@ -119,7 +119,7 @@ public async Task RunStreamingAsyncPropagatesBackgroundResponsesPropertiesToChat new ChatResponseUpdate(role: ChatRole.Assistant, content: "at?") { ConversationId = "conversation-id" }, ]; - var continuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }); + var continuationToken = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })) { InputMessages = [new ChatMessage()] }; ChatOptions? capturedChatOptions = null; Mock mockChatClient = new(); mockChatClient @@ -164,11 +164,11 @@ public async Task RunStreamingAsyncPropagatesBackgroundResponsesPropertiesToChat Assert.NotNull(capturedChatOptions); Assert.True(capturedChatOptions.AllowBackgroundResponses); - Assert.Same(continuationToken, capturedChatOptions.ContinuationToken); + Assert.Same(continuationToken.InnerToken, capturedChatOptions.ContinuationToken); } [Fact] - public async Task RunStreamingAsyncPrioritizesBackgroundResponsesPropertiesFromAgentRunOptionsOverOnesFromChatOptionsAsync() + public async Task RunStreamingAsync_WhenPropertiesSetInBothLocations_PrioritizesAgentRunOptionsOverChatOptionsAsync() { // Arrange ChatResponseUpdate[] returnUpdates = @@ -176,8 +176,8 @@ public async Task RunStreamingAsyncPrioritizesBackgroundResponsesPropertiesFromA new ChatResponseUpdate(role: ChatRole.Assistant, content: "wh") { ConversationId = "conversation-id" }, ]; - var continuationToken1 = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }); - var continuationToken2 = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }); + var continuationToken1 = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })) { InputMessages = [new ChatMessage()] }; + var continuationToken2 = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })) { InputMessages = [new ChatMessage()] }; ChatOptions? capturedChatOptions = null; Mock mockChatClient = new(); mockChatClient @@ -212,11 +212,11 @@ public async Task RunStreamingAsyncPrioritizesBackgroundResponsesPropertiesFromA // Assert Assert.NotNull(capturedChatOptions); Assert.False(capturedChatOptions.AllowBackgroundResponses); - Assert.Same(continuationToken2, capturedChatOptions.ContinuationToken); + Assert.Same(continuationToken2.InnerToken, capturedChatOptions.ContinuationToken); } [Fact] - public async Task RunAsyncPropagatesContinuationTokenFromChatResponseToAgentRunResponseAsync() + public async Task RunAsync_WhenContinuationTokenReceivedFromChatResponse_WrapsContinuationTokenAsync() { // Arrange var continuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }); @@ -237,11 +237,11 @@ public async Task RunAsyncPropagatesContinuationTokenFromChatResponseToAgentRunR var response = await agent.RunAsync([new(ChatRole.User, "hi")], thread, options: runOptions); // Assert - Assert.Same(continuationToken, response.ContinuationToken); + Assert.Same(continuationToken, (response.ContinuationToken as ChatClientAgentContinuationToken)?.InnerToken); } [Fact] - public async Task RunStreamingAsyncPropagatesContinuationTokensFromUpdatesAsync() + public async Task RunStreamingAsync_WhenContinuationTokenReceived_WrapsContinuationTokenAsync() { // Arrange var token1 = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }); @@ -272,19 +272,19 @@ public async Task RunStreamingAsyncPropagatesContinuationTokensFromUpdatesAsync( // Assert Assert.Equal(2, actualUpdates.Count); - Assert.Same(token1, actualUpdates[0].ContinuationToken); + Assert.Same(token1, (actualUpdates[0].ContinuationToken as ChatClientAgentContinuationToken)?.InnerToken); Assert.Null(actualUpdates[1].ContinuationToken); // last update has null token } [Fact] - public async Task RunAsyncThrowsWhenMessagesProvidedWithContinuationTokenAsync() + public async Task RunAsync_WhenMessagesProvidedWithContinuationToken_ThrowsInvalidOperationExceptionAsync() { // Arrange Mock mockChatClient = new(); ChatClientAgent agent = new(mockChatClient.Object); - AgentRunOptions runOptions = new() { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }; + AgentRunOptions runOptions = new() { ContinuationToken = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })) }; IEnumerable inputMessages = [new ChatMessage(ChatRole.User, "test message")]; @@ -301,14 +301,14 @@ public async Task RunAsyncThrowsWhenMessagesProvidedWithContinuationTokenAsync() } [Fact] - public async Task RunStreamingAsyncThrowsWhenMessagesProvidedWithContinuationTokenAsync() + public async Task RunStreamingAsync_WhenMessagesProvidedWithContinuationToken_ThrowsInvalidOperationExceptionAsync() { // Arrange Mock mockChatClient = new(); ChatClientAgent agent = new(mockChatClient.Object); - AgentRunOptions runOptions = new() { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }; + AgentRunOptions runOptions = new() { ContinuationToken = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })) }; IEnumerable inputMessages = [new ChatMessage(ChatRole.User, "test message")]; @@ -331,7 +331,7 @@ await Assert.ThrowsAsync(async () => } [Fact] - public async Task RunAsyncSkipsThreadMessagePopulationWithContinuationTokenAsync() + public async Task RunAsync_WhenContinuationTokenProvided_SkipsThreadMessagePopulationAsync() { // Arrange List capturedMessages = []; @@ -339,7 +339,7 @@ public async Task RunAsyncSkipsThreadMessagePopulationWithContinuationTokenAsync // Create a mock message store that would normally provide messages var mockMessageStore = new Mock(); mockMessageStore - .Setup(ms => ms.GetMessagesAsync(It.IsAny())) + .Setup(ms => ms.InvokingAsync(It.IsAny(), It.IsAny())) .ReturnsAsync([new(ChatRole.User, "Message from message store")]); // Create a mock AI context provider that would normally provide context @@ -371,7 +371,10 @@ public async Task RunAsyncSkipsThreadMessagePopulationWithContinuationTokenAsync AIContextProvider = mockContextProvider.Object }; - AgentRunOptions runOptions = new() { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }; + AgentRunOptions runOptions = new() + { + ContinuationToken = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })) + }; // Act await agent.RunAsync([], thread, options: runOptions); @@ -383,7 +386,7 @@ public async Task RunAsyncSkipsThreadMessagePopulationWithContinuationTokenAsync // Verify that message store was never called due to continuation token mockMessageStore.Verify( - ms => ms.GetMessagesAsync(It.IsAny()), + ms => ms.InvokingAsync(It.IsAny(), It.IsAny()), Times.Never); // Verify that AI context provider was never called due to continuation token @@ -393,7 +396,7 @@ public async Task RunAsyncSkipsThreadMessagePopulationWithContinuationTokenAsync } [Fact] - public async Task RunStreamingAsyncSkipsThreadMessagePopulationWithContinuationTokenAsync() + public async Task RunStreamingAsync_WhenContinuationTokenProvided_SkipsThreadMessagePopulationAsync() { // Arrange List capturedMessages = []; @@ -401,7 +404,7 @@ public async Task RunStreamingAsyncSkipsThreadMessagePopulationWithContinuationT // Create a mock message store that would normally provide messages var mockMessageStore = new Mock(); mockMessageStore - .Setup(ms => ms.GetMessagesAsync(It.IsAny())) + .Setup(ms => ms.InvokingAsync(It.IsAny(), It.IsAny())) .ReturnsAsync([new(ChatRole.User, "Message from message store")]); // Create a mock AI context provider that would normally provide context @@ -433,20 +436,21 @@ public async Task RunStreamingAsyncSkipsThreadMessagePopulationWithContinuationT AIContextProvider = mockContextProvider.Object }; - AgentRunOptions runOptions = new() { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }; + AgentRunOptions runOptions = new() + { + ContinuationToken = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })) { InputMessages = [new ChatMessage()] } + }; // Act - var exception = await Assert.ThrowsAsync(async () => await agent.RunStreamingAsync(thread, options: runOptions).ToListAsync()); + await agent.RunStreamingAsync(thread, options: runOptions).ToListAsync(); // Assert - Assert.Equal("Streaming resumption is only supported when chat history is stored and managed by the underlying AI service.", exception.Message); - // With continuation token, thread message population should be skipped Assert.Empty(capturedMessages); // Verify that message store was never called due to continuation token mockMessageStore.Verify( - ms => ms.GetMessagesAsync(It.IsAny()), + ms => ms.InvokingAsync(It.IsAny(), It.IsAny()), Times.Never); // Verify that AI context provider was never called due to continuation token @@ -456,7 +460,7 @@ public async Task RunStreamingAsyncSkipsThreadMessagePopulationWithContinuationT } [Fact] - public async Task RunAsyncThrowsWhenNoThreadProvideForBackgroundResponsesAsync() + public async Task RunAsync_WhenNoThreadProvidedForBackgroundResponses_ThrowsInvalidOperationExceptionAsync() { // Arrange Mock mockChatClient = new(); @@ -480,7 +484,7 @@ public async Task RunAsyncThrowsWhenNoThreadProvideForBackgroundResponsesAsync() } [Fact] - public async Task RunStreamingAsyncThrowsWhenNoThreadProvideForBackgroundResponsesAsync() + public async Task RunStreamingAsync_WhenNoThreadProvidedForBackgroundResponses_ThrowsInvalidOperationExceptionAsync() { // Arrange Mock mockChatClient = new(); @@ -510,126 +514,287 @@ await Assert.ThrowsAsync(async () => } [Fact] - public async Task RunAsyncThrowsWhenContinuationTokenProvidedForInitialRunAsync() + public async Task RunStreamingAsync_WhenInputMessagesPresentInContinuationToken_ResumesStreamingAsync() { // Arrange + ChatResponseUpdate[] returnUpdates = + [ + new ChatResponseUpdate(role: ChatRole.Assistant, content: "continuation") { ConversationId = "conversation-id" }, + ]; + Mock mockChatClient = new(); + mockChatClient + .Setup(c => c.GetStreamingResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())) + .Returns(ToAsyncEnumerableAsync(returnUpdates)); ChatClientAgent agent = new(mockChatClient.Object); - // Create a new thread with no ConversationId and no MessageStore (initial run state) - ChatClientAgentThread thread = new(); + ChatClientAgentThread thread = new() { ConversationId = "conversation-id" }; - AgentRunOptions runOptions = new() { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }; + AgentRunOptions runOptions = new() + { + ContinuationToken = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })) + { + InputMessages = [new ChatMessage(ChatRole.User, "previous message")] + } + }; - // Act & Assert - var exception = await Assert.ThrowsAsync(() => agent.RunAsync(thread: thread, options: runOptions)); - Assert.Equal("Continuation tokens are not allowed to be used for initial runs.", exception.Message); + // Act + var updates = new List(); + await foreach (var update in agent.RunStreamingAsync(thread, options: runOptions)) + { + updates.Add(update); + } - // Verify that the IChatClient was never called due to early validation + // Assert + Assert.Single(updates); + + // Verify that the IChatClient was called mockChatClient.Verify( - c => c.GetResponseAsync( + c => c.GetStreamingResponseAsync( It.IsAny>(), It.IsAny(), It.IsAny()), - Times.Never); + Times.Once); } [Fact] - public async Task RunStreamingAsyncThrowsWhenContinuationTokenProvidedForInitialRunAsync() + public async Task RunStreamingAsync_WhenResponseUpdatesPresentInContinuationToken_ResumesStreamingAsync() { // Arrange + ChatResponseUpdate[] returnUpdates = + [ + new ChatResponseUpdate(role: ChatRole.Assistant, content: "continuation") { ConversationId = "conversation-id" }, + ]; + Mock mockChatClient = new(); + mockChatClient + .Setup(c => c.GetStreamingResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())) + .Returns(ToAsyncEnumerableAsync(returnUpdates)); ChatClientAgent agent = new(mockChatClient.Object); - // Create a new thread with no ConversationId and no MessageStore (initial run state) - ChatClientAgentThread thread = new(); + ChatClientAgentThread thread = new() { ConversationId = "conversation-id" }; - AgentRunOptions runOptions = new() { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }; + AgentRunOptions runOptions = new() + { + ContinuationToken = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })) + { + ResponseUpdates = [new ChatResponseUpdate(ChatRole.Assistant, "previous update")] + } + }; - // Act & Assert - var exception = await Assert.ThrowsAsync(async () => await agent.RunStreamingAsync(thread: thread, options: runOptions).ToListAsync()); - Assert.Equal("Continuation tokens are not allowed to be used for initial runs.", exception.Message); + // Act + var updates = new List(); + await foreach (var update in agent.RunStreamingAsync(thread, options: runOptions)) + { + updates.Add(update); + } - // Verify that the IChatClient was never called due to early validation + // Assert + Assert.Single(updates); + + // Verify that the IChatClient was called mockChatClient.Verify( c => c.GetStreamingResponseAsync( It.IsAny>(), It.IsAny(), It.IsAny()), - Times.Never); + Times.Once); } [Fact] - public async Task RunStreamingAsyncThrowsWhenContinuationTokenUsedWithClientSideManagedChatHistoryAsync() + public async Task RunStreamingAsync_WhenResumingStreaming_UsesUpdatesFromInitialRunForContextProviderAndMessageStoreAsync() { // Arrange + ChatResponseUpdate[] returnUpdates = + [ + new ChatResponseUpdate(role: ChatRole.Assistant, content: "upon"), + new ChatResponseUpdate(role: ChatRole.Assistant, content: " a"), + new ChatResponseUpdate(role: ChatRole.Assistant, content: " time"), + ]; + Mock mockChatClient = new(); + mockChatClient + .Setup(c => c.GetStreamingResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())) + .Returns(ToAsyncEnumerableAsync(returnUpdates)); ChatClientAgent agent = new(mockChatClient.Object); - // Create a thread with a MessageStore + List capturedMessagesAddedToStore = []; + var mockMessageStore = new Mock(); + mockMessageStore + .Setup(ms => ms.InvokedAsync(It.IsAny(), It.IsAny())) + .Callback((ctx, ct) => capturedMessagesAddedToStore.AddRange(ctx.ResponseMessages ?? [])) + .Returns(new ValueTask()); + + AIContextProvider.InvokedContext? capturedInvokedContext = null; + var mockContextProvider = new Mock(); + mockContextProvider + .Setup(cp => cp.InvokedAsync(It.IsAny(), It.IsAny())) + .Callback((context, ct) => capturedInvokedContext = context) + .Returns(new ValueTask()); + ChatClientAgentThread thread = new() { - MessageStore = new InMemoryChatMessageStore(), // Setting a message store to skip checking the continuation token in the initial run - ConversationId = null, // No conversation ID to simulate client-side managed chat history + MessageStore = mockMessageStore.Object, + AIContextProvider = mockContextProvider.Object }; - // Create run options with a continuation token - AgentRunOptions runOptions = new() { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }; + AgentRunOptions runOptions = new() + { + ContinuationToken = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })) + { + ResponseUpdates = [new ChatResponseUpdate(ChatRole.Assistant, "once ")] + } + }; - // Act & Assert - var exception = await Assert.ThrowsAsync(async () => await agent.RunStreamingAsync(thread: thread, options: runOptions).ToListAsync()); - Assert.Equal("Streaming resumption is only supported when chat history is stored and managed by the underlying AI service.", exception.Message); + // Act + await agent.RunStreamingAsync(thread, options: runOptions).ToListAsync(); - // Verify that the IChatClient was never called due to early validation - mockChatClient.Verify( - c => c.GetStreamingResponseAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny()), - Times.Never); + // Assert + mockMessageStore.Verify(ms => ms.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); + Assert.Single(capturedMessagesAddedToStore); + Assert.Contains("once upon a time", capturedMessagesAddedToStore[0].Text); + + mockContextProvider.Verify(cp => cp.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); + Assert.NotNull(capturedInvokedContext?.ResponseMessages); + Assert.Single(capturedInvokedContext.ResponseMessages); + Assert.Contains("once upon a time", capturedInvokedContext.ResponseMessages.ElementAt(0).Text); } [Fact] - public async Task RunStreamingAsyncThrowsWhenContinuationTokenUsedWithAIContextProviderAsync() + public async Task RunStreamingAsync_WhenResumingStreaming_UsesInputMessagesFromInitialRunForContextProviderAndMessageStoreAsync() { // Arrange Mock mockChatClient = new(); + mockChatClient + .Setup(c => c.GetStreamingResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())) + .Returns(ToAsyncEnumerableAsync(Array.Empty())); ChatClientAgent agent = new(mockChatClient.Object); - // Create a mock AIContextProvider + List capturedMessagesAddedToStore = []; + var mockMessageStore = new Mock(); + mockMessageStore + .Setup(ms => ms.InvokedAsync(It.IsAny(), It.IsAny())) + .Callback((ctx, ct) => capturedMessagesAddedToStore.AddRange(ctx.RequestMessages)) + .Returns(new ValueTask()); + + AIContextProvider.InvokedContext? capturedInvokedContext = null; var mockContextProvider = new Mock(); mockContextProvider - .Setup(p => p.InvokingAsync(It.IsAny(), It.IsAny())) - .ReturnsAsync(new AIContext()); - mockContextProvider - .Setup(p => p.InvokedAsync(It.IsAny(), It.IsAny())) + .Setup(cp => cp.InvokedAsync(It.IsAny(), It.IsAny())) + .Callback((context, ct) => capturedInvokedContext = context) .Returns(new ValueTask()); - // Create a thread with an AIContextProvider and conversation ID to simulate non-initial run ChatClientAgentThread thread = new() { - ConversationId = "existing-conversation-id", + MessageStore = mockMessageStore.Object, AIContextProvider = mockContextProvider.Object }; - AgentRunOptions runOptions = new() { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }; + AgentRunOptions runOptions = new() + { + ContinuationToken = new ChatClientAgentContinuationToken(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 })) + { + InputMessages = [new ChatMessage(ChatRole.User, "Tell me a story")], + } + }; - // Act & Assert - var exception = await Assert.ThrowsAsync(async () => await agent.RunStreamingAsync(thread: thread, options: runOptions).ToListAsync()); + // Act + await agent.RunStreamingAsync(thread, options: runOptions).ToListAsync(); - Assert.Equal("Using context provider with streaming resumption is not supported.", exception.Message); + // Assert + mockMessageStore.Verify(ms => ms.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); + Assert.Single(capturedMessagesAddedToStore); + Assert.Contains("Tell me a story", capturedMessagesAddedToStore[0].Text); + + mockContextProvider.Verify(cp => cp.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); + Assert.NotNull(capturedInvokedContext?.RequestMessages); + Assert.Single(capturedInvokedContext.RequestMessages); + Assert.Contains("Tell me a story", capturedInvokedContext.RequestMessages.ElementAt(0).Text); + } - // Verify that the IChatClient was never called due to early validation - mockChatClient.Verify( - c => c.GetStreamingResponseAsync( + [Fact] + public async Task RunStreamingAsync_WhenResumingStreaming_SavesInputMessagesAndUpdatesInContinuationTokenAsync() + { + // Arrange + List returnUpdates = + [ + new ChatResponseUpdate(role: ChatRole.Assistant, content: "Once") { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }, + new ChatResponseUpdate(role: ChatRole.Assistant, content: " upon") { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }, + new ChatResponseUpdate(role: ChatRole.Assistant, content: " a") { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }, + new ChatResponseUpdate(role: ChatRole.Assistant, content: " time"){ ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }) }, + ]; + + Mock mockChatClient = new(); + mockChatClient + .Setup(c => c.GetStreamingResponseAsync( It.IsAny>(), It.IsAny(), - It.IsAny()), - Times.Never); + It.IsAny())) + .Returns(ToAsyncEnumerableAsync(returnUpdates)); + + ChatClientAgent agent = new(mockChatClient.Object); + + ChatClientAgentThread thread = new() { }; + + List capturedContinuationTokens = []; + + ChatMessage userMessage = new(ChatRole.User, "Tell me a story"); + + // Act + + // Do the initial run + await foreach (var update in agent.RunStreamingAsync(userMessage, thread)) + { + capturedContinuationTokens.Add(Assert.IsType(update.ContinuationToken)); + break; + } + + // Now resume the run using the captured continuation token + returnUpdates.RemoveAt(0); // remove the first mock update as it was already processed + var options = new AgentRunOptions { ContinuationToken = capturedContinuationTokens[0] }; + await foreach (var update in agent.RunStreamingAsync(thread, options: options)) + { + capturedContinuationTokens.Add(Assert.IsType(update.ContinuationToken)); + } + + // Assert + Assert.Equal(4, capturedContinuationTokens.Count); + + // Verify that the first continuation token has the initial input and first update + Assert.NotNull(capturedContinuationTokens[0].InputMessages); + Assert.Single(capturedContinuationTokens[0].InputMessages!); + Assert.Equal("Tell me a story", capturedContinuationTokens[0].InputMessages!.Last().Text); + Assert.NotNull(capturedContinuationTokens[0].ResponseUpdates); + Assert.Single(capturedContinuationTokens[0].ResponseUpdates!); + Assert.Equal("Once", capturedContinuationTokens[0].ResponseUpdates![0].Text); + + // Verify the last continuation token has the input and all updates + var lastToken = capturedContinuationTokens[^1]; + Assert.NotNull(lastToken.InputMessages); + Assert.Single(lastToken.InputMessages!); + Assert.Equal("Tell me a story", lastToken.InputMessages!.Last().Text); + Assert.NotNull(lastToken.ResponseUpdates); + Assert.Equal(4, lastToken.ResponseUpdates!.Count); + Assert.Equal("Once", lastToken.ResponseUpdates!.ElementAt(0).Text); + Assert.Equal(" upon", lastToken.ResponseUpdates!.ElementAt(1).Text); + Assert.Equal(" a", lastToken.ResponseUpdates!.ElementAt(2).Text); + Assert.Equal(" time", lastToken.ResponseUpdates!.ElementAt(3).Text); } private static async IAsyncEnumerable ToAsyncEnumerableAsync(IEnumerable values) diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_RunWithCustomOptionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_RunWithCustomOptionsTests.cs new file mode 100644 index 0000000000..85cb4cf0b4 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_RunWithCustomOptionsTests.cs @@ -0,0 +1,456 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text.Json.Serialization; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.AI; +using Moq; + +namespace Microsoft.Agents.AI.UnitTests; + +/// +/// Tests for run methods with . +/// +public sealed partial class ChatClientAgent_RunWithCustomOptionsTests +{ + #region RunAsync Tests + + [Fact] + public async Task RunAsync_WithThreadAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "Response")])); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + ChatClientAgentRunOptions options = new(); + + // Act + AgentRunResponse result = await agent.RunAsync(thread, options); + + // Assert + Assert.NotNull(result); + Assert.Single(result.Messages); + mockChatClient.Verify( + x => x.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + [Fact] + public async Task RunAsync_WithStringMessageAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "Response")])); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + ChatClientAgentRunOptions options = new(); + + // Act + AgentRunResponse result = await agent.RunAsync("Test message", thread, options); + + // Assert + Assert.NotNull(result); + Assert.Single(result.Messages); + mockChatClient.Verify( + x => x.GetResponseAsync( + It.Is>(msgs => msgs.Any(m => m.Text == "Test message")), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + [Fact] + public async Task RunAsync_WithChatMessageAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "Response")])); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + ChatMessage message = new(ChatRole.User, "Test message"); + ChatClientAgentRunOptions options = new(); + + // Act + AgentRunResponse result = await agent.RunAsync(message, thread, options); + + // Assert + Assert.NotNull(result); + Assert.Single(result.Messages); + mockChatClient.Verify( + x => x.GetResponseAsync( + It.Is>(msgs => msgs.Contains(message)), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + [Fact] + public async Task RunAsync_WithMessagesCollectionAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "Response")])); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + IEnumerable messages = [new(ChatRole.User, "Message 1"), new(ChatRole.User, "Message 2")]; + ChatClientAgentRunOptions options = new(); + + // Act + AgentRunResponse result = await agent.RunAsync(messages, thread, options); + + // Assert + Assert.NotNull(result); + Assert.Single(result.Messages); + mockChatClient.Verify( + x => x.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + [Fact] + public async Task RunAsync_WithChatOptionsInRunOptions_UsesChatOptionsAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "Response")])); + + ChatClientAgent agent = new(mockChatClient.Object); + ChatClientAgentRunOptions options = new(new ChatOptions { Temperature = 0.5f }); + + // Act + AgentRunResponse result = await agent.RunAsync("Test", null, options); + + // Assert + Assert.NotNull(result); + mockChatClient.Verify( + x => x.GetResponseAsync( + It.IsAny>(), + It.Is(opts => opts.Temperature == 0.5f), + It.IsAny()), + Times.Once); + } + + #endregion + + #region RunStreamingAsync Tests + + [Fact] + public async Task RunStreamingAsync_WithThreadAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetStreamingResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).Returns(GetAsyncUpdatesAsync()); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + ChatClientAgentRunOptions options = new(); + + // Act + var updates = new List(); + await foreach (var update in agent.RunStreamingAsync(thread, options)) + { + updates.Add(update); + } + + // Assert + Assert.NotEmpty(updates); + mockChatClient.Verify( + x => x.GetStreamingResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + [Fact] + public async Task RunStreamingAsync_WithStringMessageAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetStreamingResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).Returns(GetAsyncUpdatesAsync()); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + ChatClientAgentRunOptions options = new(); + + // Act + var updates = new List(); + await foreach (var update in agent.RunStreamingAsync("Test message", thread, options)) + { + updates.Add(update); + } + + // Assert + Assert.NotEmpty(updates); + mockChatClient.Verify( + x => x.GetStreamingResponseAsync( + It.Is>(msgs => msgs.Any(m => m.Text == "Test message")), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + [Fact] + public async Task RunStreamingAsync_WithChatMessageAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetStreamingResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).Returns(GetAsyncUpdatesAsync()); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + ChatMessage message = new(ChatRole.User, "Test message"); + ChatClientAgentRunOptions options = new(); + + // Act + var updates = new List(); + await foreach (var update in agent.RunStreamingAsync(message, thread, options)) + { + updates.Add(update); + } + + // Assert + Assert.NotEmpty(updates); + mockChatClient.Verify( + x => x.GetStreamingResponseAsync( + It.Is>(msgs => msgs.Contains(message)), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + [Fact] + public async Task RunStreamingAsync_WithMessagesCollectionAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetStreamingResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).Returns(GetAsyncUpdatesAsync()); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + IEnumerable messages = [new ChatMessage(ChatRole.User, "Message 1"), new ChatMessage(ChatRole.User, "Message 2")]; + ChatClientAgentRunOptions options = new(); + + // Act + var updates = new List(); + await foreach (var update in agent.RunStreamingAsync(messages, thread, options)) + { + updates.Add(update); + } + + // Assert + Assert.NotEmpty(updates); + mockChatClient.Verify( + x => x.GetStreamingResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + #endregion + + #region Helper Methods + + private static async IAsyncEnumerable GetAsyncUpdatesAsync() + { + yield return new ChatResponseUpdate { Contents = new[] { new TextContent("Hello") } }; + yield return new ChatResponseUpdate { Contents = new[] { new TextContent(" World") } }; + await Task.CompletedTask; + } + + #endregion + + #region RunAsync{T} Tests + + [Fact] + public async Task RunAsyncOfT_WithThreadAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, """{"id":2, "fullName":"Tigger", "species":"Tiger"}""")])); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + ChatClientAgentRunOptions options = new(); + + // Act + AgentRunResponse agentRunResponse = await agent.RunAsync(thread, JsonContext_WithCustomRunOptions.Default.Options, options); + + // Assert + Assert.NotNull(agentRunResponse); + Assert.Single(agentRunResponse.Messages); + Assert.Equal("Tigger", agentRunResponse.Result.FullName); + mockChatClient.Verify( + x => x.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + [Fact] + public async Task RunAsyncOfT_WithStringMessageAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, """{"id":2, "fullName":"Tigger", "species":"Tiger"}""")])); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + ChatClientAgentRunOptions options = new(); + + // Act + AgentRunResponse agentRunResponse = await agent.RunAsync("Test message", thread, JsonContext_WithCustomRunOptions.Default.Options, options); + + // Assert + Assert.NotNull(agentRunResponse); + Assert.Single(agentRunResponse.Messages); + Assert.Equal("Tigger", agentRunResponse.Result.FullName); + mockChatClient.Verify( + x => x.GetResponseAsync( + It.Is>(msgs => msgs.Any(m => m.Text == "Test message")), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + [Fact] + public async Task RunAsyncOfT_WithChatMessageAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, """{"id":2, "fullName":"Tigger", "species":"Tiger"}""")])); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + ChatMessage message = new(ChatRole.User, "Test message"); + ChatClientAgentRunOptions options = new(); + + // Act + AgentRunResponse agentRunResponse = await agent.RunAsync(message, thread, JsonContext_WithCustomRunOptions.Default.Options, options); + + // Assert + Assert.NotNull(agentRunResponse); + Assert.Single(agentRunResponse.Messages); + Assert.Equal("Tigger", agentRunResponse.Result.FullName); + mockChatClient.Verify( + x => x.GetResponseAsync( + It.Is>(msgs => msgs.Contains(message)), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + [Fact] + public async Task RunAsyncOfT_WithMessagesCollectionAndOptions_CallsBaseMethodAsync() + { + // Arrange + Mock mockChatClient = new(); + mockChatClient.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, """{"id":2, "fullName":"Tigger", "species":"Tiger"}""")])); + + ChatClientAgent agent = new(mockChatClient.Object); + AgentThread thread = agent.GetNewThread(); + IEnumerable messages = [new(ChatRole.User, "Message 1"), new(ChatRole.User, "Message 2")]; + ChatClientAgentRunOptions options = new(); + + // Act + AgentRunResponse agentRunResponse = await agent.RunAsync(messages, thread, JsonContext_WithCustomRunOptions.Default.Options, options); + + // Assert + Assert.NotNull(agentRunResponse); + Assert.Single(agentRunResponse.Messages); + Assert.Equal("Tigger", agentRunResponse.Result.FullName); + mockChatClient.Verify( + x => x.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny()), + Times.Once); + } + + #endregion + + private sealed class Animal + { + public int Id { get; set; } + public string? FullName { get; set; } + public Species Species { get; set; } + } + + private enum Species + { + Bear, + Tiger, + Walrus, + } + + [JsonSourceGenerationOptions(UseStringEnumConverter = true, PropertyNamingPolicy = JsonKnownNamingPolicy.CamelCase)] + [JsonSerializable(typeof(Animal))] + private sealed partial class JsonContext_WithCustomRunOptions : JsonSerializerContext; +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/WorkflowHostSmokeTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/WorkflowHostSmokeTests.cs new file mode 100644 index 0000000000..be3c96d9f5 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/WorkflowHostSmokeTests.cs @@ -0,0 +1,114 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.Workflows.UnitTests; + +public sealed class ExpectedException : Exception +{ + public ExpectedException(string message) + : base(message) + { + } + + public ExpectedException() : base() + { + } + + public ExpectedException(string? message, Exception? innerException) : base(message, innerException) + { + } +} + +public class WorkflowHostSmokeTests +{ + private sealed class AlwaysFailsAIAgent(bool failByThrowing) : AIAgent + { + private sealed class Thread : InMemoryAgentThread + { + public Thread() { } + + public Thread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) + : base(serializedThread, jsonSerializerOptions) + { } + } + + public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) + { + return new Thread(serializedThread, jsonSerializerOptions); + } + + public override AgentThread GetNewThread() + { + return new Thread(); + } + + protected override async Task RunCoreAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + { + return await this.RunStreamingAsync(messages, thread, options, cancellationToken) + .ToAgentRunResponseAsync(cancellationToken); + } + + protected override async IAsyncEnumerable RunCoreStreamingAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + const string ErrorMessage = "Simulated agent failure."; + if (failByThrowing) + { + throw new ExpectedException(ErrorMessage); + } + + yield return new AgentRunResponseUpdate(ChatRole.Assistant, [new ErrorContent(ErrorMessage)]); + } + } + + private static Workflow CreateWorkflow(bool failByThrowing) + { + ExecutorBinding agent = new AlwaysFailsAIAgent(failByThrowing).BindAsExecutor(emitEvents: true); + + return new WorkflowBuilder(agent).Build(); + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task Test_AsAgent_ErrorContentStreamedOutAsync(bool includeExceptionDetails, bool failByThrowing) + { + string expectedMessage = !failByThrowing || includeExceptionDetails + ? "Simulated agent failure." + : "An error occurred while executing the workflow."; + + // Arrange is done by the caller. + Workflow workflow = CreateWorkflow(failByThrowing); + + // Act + List updates = await workflow.AsAgent("WorkflowAgent", includeExceptionDetails: includeExceptionDetails) + .RunStreamingAsync(new ChatMessage(ChatRole.User, "Hello")) + .ToListAsync(); + + // Assert + bool hadErrorContent = false; + foreach (AgentRunResponseUpdate update in updates) + { + if (update.Contents.Any()) + { + // We should expect a single update which contains the error content. + update.Contents.Should().ContainSingle() + .Which.Should().BeOfType() + .Which.Message.Should().Be(expectedMessage); + hadErrorContent = true; + } + } + + hadErrorContent.Should().BeTrue(); + } +} diff --git a/dotnet/tests/OpenAIChatCompletion.IntegrationTests/OpenAIChatCompletionFixture.cs b/dotnet/tests/OpenAIChatCompletion.IntegrationTests/OpenAIChatCompletionFixture.cs index 656d310ddf..0fb9745d2d 100644 --- a/dotnet/tests/OpenAIChatCompletion.IntegrationTests/OpenAIChatCompletionFixture.cs +++ b/dotnet/tests/OpenAIChatCompletion.IntegrationTests/OpenAIChatCompletionFixture.cs @@ -32,7 +32,12 @@ public async Task> GetChatHistoryAsync(AgentThread thread) { var typedThread = (ChatClientAgentThread)thread; - return typedThread.MessageStore is null ? [] : (await typedThread.MessageStore.GetMessagesAsync()).ToList(); + if (typedThread.MessageStore is null) + { + return []; + } + + return (await typedThread.MessageStore.InvokingAsync(new([]))).ToList(); } public Task CreateChatClientAgentAsync( diff --git a/dotnet/tests/OpenAIResponse.IntegrationTests/OpenAIResponseFixture.cs b/dotnet/tests/OpenAIResponse.IntegrationTests/OpenAIResponseFixture.cs index c6c84db569..c57e1c460d 100644 --- a/dotnet/tests/OpenAIResponse.IntegrationTests/OpenAIResponseFixture.cs +++ b/dotnet/tests/OpenAIResponse.IntegrationTests/OpenAIResponseFixture.cs @@ -50,7 +50,12 @@ public async Task> GetChatHistoryAsync(AgentThread thread) return [.. previousMessages, responseMessage]; } - return typedThread.MessageStore is null ? [] : (await typedThread.MessageStore.GetMessagesAsync()).ToList(); + if (typedThread.MessageStore is null) + { + return []; + } + + return (await typedThread.MessageStore.InvokingAsync(new([]))).ToList(); } private static ChatMessage ConvertToChatMessage(ResponseItem item) diff --git a/python/CHANGELOG.md b/python/CHANGELOG.md index 18ba979fb8..c1099e7ba7 100644 --- a/python/CHANGELOG.md +++ b/python/CHANGELOG.md @@ -7,6 +7,41 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [1.0.0b260107] - 2026-01-07 + +### Added + +- **agent-framework-devui**: Improve DevUI and add Context Inspector view as a new tab under traces ([#2742](https://github.com/microsoft/agent-framework/pull/2742)) by @victordibia +- **samples**: Add streaming sample for Azure Functions ([#3057](https://github.com/microsoft/agent-framework/pull/3057)) by @gavin-aguiar + +### Changed + +- **repo**: Update templates ([#3106](https://github.com/microsoft/agent-framework/pull/3106)) by @eavanvalkenburg + +### Fixed + +- **agent-framework-ag-ui**: Fix MCP tool result serialization for list[TextContent] ([#2523](https://github.com/microsoft/agent-framework/pull/2523)) by @claude89757 +- **agent-framework-azure-ai**: Fix response_format handling for structured outputs ([#3114](https://github.com/microsoft/agent-framework/pull/3114)) by @moonbox3 + +## [1.0.0b260106] - 2026-01-06 + +### Added + +- **repo**: Add issue template and additional labeling ([#3006](https://github.com/microsoft/agent-framework/pull/3006)) by @eavanvalkenburg + +### Changed + +- None + +### Fixed + +- **agent-framework-core**: Fix max tokens translation and add extra integer test ([#3037](https://github.com/microsoft/agent-framework/pull/3037)) by @eavanvalkenburg +- **agent-framework-azure-ai**: Fix failure when conversation history contains assistant messages ([#3076](https://github.com/microsoft/agent-framework/pull/3076)) by @moonbox3 +- **agent-framework-core**: Use HTTP exporter for http/protobuf protocol ([#3070](https://github.com/microsoft/agent-framework/pull/3070)) by @takanori-terai +- **agent-framework-core**: Fix ExecutorInvokedEvent and ExecutorCompletedEvent observability data ([#3090](https://github.com/microsoft/agent-framework/pull/3090)) by @moonbox3 +- **agent-framework-core**: Honor tool_choice parameter passed to agent.run() and chat client methods ([#3095](https://github.com/microsoft/agent-framework/pull/3095)) by @moonbox3 +- **samples**: AzureAI SharePoint sample fix ([#3108](https://github.com/microsoft/agent-framework/pull/3108)) by @giles17 + ## [1.0.0b251223] - 2025-12-23 ### Added @@ -426,7 +461,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 For more information, see the [announcement blog post](https://devblogs.microsoft.com/foundry/introducing-microsoft-agent-framework-the-open-source-engine-for-agentic-ai-apps/). -[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251223...HEAD +[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b260107...HEAD +[1.0.0b260107]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b260106...python-1.0.0b260107 +[1.0.0b260106]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251223...python-1.0.0b260106 [1.0.0b251223]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251218...python-1.0.0b251223 [1.0.0b251218]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251216...python-1.0.0b251218 [1.0.0b251216]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251211...python-1.0.0b251216 diff --git a/python/packages/a2a/pyproject.toml b/python/packages/a2a/pyproject.toml index c37ba371e2..b9935fb9c3 100644 --- a/python/packages/a2a/pyproject.toml +++ b/python/packages/a2a/pyproject.toml @@ -4,7 +4,7 @@ description = "A2A integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251223" +version = "1.0.0b260107" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_confirmation_strategies.py b/python/packages/ag-ui/agent_framework_ag_ui/_confirmation_strategies.py index 8bba842705..35e648c100 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_confirmation_strategies.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_confirmation_strategies.py @@ -11,21 +11,61 @@ class ConfirmationStrategy(ABC): - """Strategy for generating confirmation messages during human-in-the-loop flows.""" + """Strategy for generating confirmation messages during human-in-the-loop flows. + Subclasses must define the message properties. The methods use those properties + by default, but can be overridden for complete customization. + """ + + @property + @abstractmethod + def approval_header(self) -> str: + """Header for approval accepted message. Must be overridden.""" + ... + + @property + @abstractmethod + def approval_footer(self) -> str: + """Footer for approval accepted message. Must be overridden.""" + ... + + @property + @abstractmethod + def rejection_message(self) -> str: + """Message when user rejects. Must be overridden.""" + ... + + @property @abstractmethod + def state_confirmed_message(self) -> str: + """Message when state is confirmed. Must be overridden.""" + ... + + @property + @abstractmethod + def state_rejected_message(self) -> str: + """Message when state is rejected. Must be overridden.""" + ... + def on_approval_accepted(self, steps: list[dict[str, Any]]) -> str: """Generate message when user approves function execution. + Default implementation uses header/footer properties. + Override for complete customization. + Args: steps: List of approved steps with 'description', 'status', etc. Returns: Message to display to user """ - ... + enabled_steps = [s for s in steps if s.get("status") == "enabled"] + message_parts = [self.approval_header.format(count=len(enabled_steps))] + for i, step in enumerate(enabled_steps, 1): + message_parts.append(f"{i}. {step['description']}\n") + message_parts.append(self.approval_footer) + return "".join(message_parts) - @abstractmethod def on_approval_rejected(self, steps: list[dict[str, Any]]) -> str: """Generate message when user rejects function execution. @@ -35,141 +75,143 @@ def on_approval_rejected(self, steps: list[dict[str, Any]]) -> str: Returns: Message to display to user """ - ... + return self.rejection_message - @abstractmethod def on_state_confirmed(self) -> str: """Generate message when user confirms predictive state changes. Returns: Message to display to user """ - ... + return self.state_confirmed_message - @abstractmethod def on_state_rejected(self) -> str: """Generate message when user rejects predictive state changes. Returns: Message to display to user """ - ... + return self.state_rejected_message class DefaultConfirmationStrategy(ConfirmationStrategy): - """Generic confirmation messages suitable for most agents. - - This preserves the original behavior from v1. - """ + """Generic confirmation messages suitable for most agents.""" - def on_approval_accepted(self, steps: list[dict[str, Any]]) -> str: - """Generate generic approval message with step list.""" - enabled_steps = [s for s in steps if s.get("status") == "enabled"] - - message_parts = [f"Executing {len(enabled_steps)} approved steps:\n\n"] - - for i, step in enumerate(enabled_steps, 1): - message_parts.append(f"{i}. {step['description']}\n") - - message_parts.append("\nAll steps completed successfully!") + @property + def approval_header(self) -> str: + return "Executing {count} approved steps:\n\n" - return "".join(message_parts) + @property + def approval_footer(self) -> str: + return "\nAll steps completed successfully!" - def on_approval_rejected(self, steps: list[dict[str, Any]]) -> str: - """Generate generic rejection message.""" + @property + def rejection_message(self) -> str: return "No problem! What would you like me to change about the plan?" - def on_state_confirmed(self) -> str: - """Generate generic state confirmation message.""" + @property + def state_confirmed_message(self) -> str: return "Changes confirmed and applied successfully!" - def on_state_rejected(self) -> str: - """Generate generic state rejection message.""" + @property + def state_rejected_message(self) -> str: return "No problem! What would you like me to change?" class TaskPlannerConfirmationStrategy(ConfirmationStrategy): """Domain-specific confirmation messages for task planning agents.""" - def on_approval_accepted(self, steps: list[dict[str, Any]]) -> str: - """Generate task-specific approval message.""" - enabled_steps = [s for s in steps if s.get("status") == "enabled"] - - message_parts = ["Executing your requested tasks:\n\n"] - - for i, step in enumerate(enabled_steps, 1): - message_parts.append(f"{i}. {step['description']}\n") + @property + def approval_header(self) -> str: + return "Executing your requested tasks:\n\n" - message_parts.append("\nAll tasks completed successfully!") + @property + def approval_footer(self) -> str: + return "\nAll tasks completed successfully!" - return "".join(message_parts) - - def on_approval_rejected(self, steps: list[dict[str, Any]]) -> str: - """Generate task-specific rejection message.""" + @property + def rejection_message(self) -> str: return "No problem! Let me revise the plan. What would you like me to change?" - def on_state_confirmed(self) -> str: - """Task planners typically don't use state confirmation.""" + @property + def state_confirmed_message(self) -> str: return "Tasks confirmed and ready to execute!" - def on_state_rejected(self) -> str: - """Task planners typically don't use state confirmation.""" + @property + def state_rejected_message(self) -> str: return "No problem! How should I adjust the task list?" class RecipeConfirmationStrategy(ConfirmationStrategy): """Domain-specific confirmation messages for recipe agents.""" - def on_approval_accepted(self, steps: list[dict[str, Any]]) -> str: - """Generate recipe-specific approval message.""" - enabled_steps = [s for s in steps if s.get("status") == "enabled"] - - message_parts = ["Updating your recipe:\n\n"] + @property + def approval_header(self) -> str: + return "Updating your recipe:\n\n" - for i, step in enumerate(enabled_steps, 1): - message_parts.append(f"{i}. {step['description']}\n") - - message_parts.append("\nRecipe updated successfully!") - - return "".join(message_parts) + @property + def approval_footer(self) -> str: + return "\nRecipe updated successfully!" - def on_approval_rejected(self, steps: list[dict[str, Any]]) -> str: - """Generate recipe-specific rejection message.""" + @property + def rejection_message(self) -> str: return "No problem! What ingredients or steps should I change?" - def on_state_confirmed(self) -> str: - """Generate recipe-specific state confirmation message.""" + @property + def state_confirmed_message(self) -> str: return "Recipe changes applied successfully!" - def on_state_rejected(self) -> str: - """Generate recipe-specific state rejection message.""" + @property + def state_rejected_message(self) -> str: return "No problem! What would you like me to adjust in the recipe?" class DocumentWriterConfirmationStrategy(ConfirmationStrategy): """Domain-specific confirmation messages for document writing agents.""" - def on_approval_accepted(self, steps: list[dict[str, Any]]) -> str: - """Generate document-specific approval message.""" - enabled_steps = [s for s in steps if s.get("status") == "enabled"] - - message_parts = ["Applying your edits:\n\n"] - - for i, step in enumerate(enabled_steps, 1): - message_parts.append(f"{i}. {step['description']}\n") - - message_parts.append("\nDocument updated successfully!") + @property + def approval_header(self) -> str: + return "Applying your edits:\n\n" - return "".join(message_parts) + @property + def approval_footer(self) -> str: + return "\nDocument updated successfully!" - def on_approval_rejected(self, steps: list[dict[str, Any]]) -> str: - """Generate document-specific rejection message.""" + @property + def rejection_message(self) -> str: return "No problem! Which changes should I keep or modify?" - def on_state_confirmed(self) -> str: - """Generate document-specific state confirmation message.""" + @property + def state_confirmed_message(self) -> str: return "Document edits applied!" - def on_state_rejected(self) -> str: - """Generate document-specific state rejection message.""" + @property + def state_rejected_message(self) -> str: return "No problem! What should I change about the document?" + + +def apply_confirmation_strategy( + strategy: ConfirmationStrategy | None, + accepted: bool, + steps: list[dict[str, Any]], +) -> str: + """Apply a confirmation strategy to generate a message. + + This helper consolidates the pattern used in multiple orchestrators. + + Args: + strategy: Strategy to use, or None for default + accepted: Whether the user approved + steps: List of steps (may be empty for state confirmations) + + Returns: + Generated message string + """ + if strategy is None: + strategy = DefaultConfirmationStrategy() + + if not steps: + # State confirmation (no steps) + return strategy.on_state_confirmed() if accepted else strategy.on_state_rejected() + # Step-based approval + return strategy.on_approval_accepted(steps) if accepted else strategy.on_approval_rejected(steps) diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_events.py b/python/packages/ag-ui/agent_framework_ag_ui/_events.py index 184da0239e..812c99064d 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_events.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_events.py @@ -11,8 +11,6 @@ from ag_ui.core import ( BaseEvent, CustomEvent, - EventType, - MessagesSnapshotEvent, RunFinishedEvent, RunStartedEvent, StateDeltaEvent, @@ -31,9 +29,10 @@ FunctionCallContent, FunctionResultContent, TextContent, + prepare_function_call_results, ) -from ._utils import generate_event_id +from ._utils import extract_state_from_tool_args, generate_event_id, safe_json_parse logger = logging.getLogger(__name__) @@ -48,8 +47,8 @@ def __init__( predict_state_config: dict[str, dict[str, str]] | None = None, current_state: dict[str, Any] | None = None, skip_text_content: bool = False, - input_messages: list[Any] | None = None, require_confirmation: bool = True, + approval_tool_name: str | None = None, ) -> None: """ Initialize the event bridge. @@ -61,7 +60,6 @@ def __init__( Format: {"state_key": {"tool": "tool_name", "tool_argument": "arg_name"}} current_state: Reference to the current state dict for tracking updates. skip_text_content: If True, skip emitting TextMessageContentEvents (for structured outputs). - input_messages: The input messages from the conversation history. require_confirmation: Whether predictive state updates require user confirmation. """ self.run_id = run_id @@ -74,6 +72,7 @@ def __init__( self.pending_state_updates: dict[str, Any] = {} # Track updates from tool calls self.skip_text_content = skip_text_content self.require_confirmation = require_confirmation + self.approval_tool_name = approval_tool_name # For predictive state updates: accumulate streaming arguments self.streaming_tool_args: str = "" # Accumulated JSON string @@ -82,13 +81,6 @@ def __init__( self.should_stop_after_confirm: bool = False # Flag to stop run after confirm_changes self.suppressed_summary: str = "" # Store LLM summary to show after confirmation - # For MessagesSnapshotEvent: track tool calls and results - self.input_messages = input_messages or [] - self.pending_tool_calls: list[dict[str, Any]] = [] # Track tool calls for assistant message - self.tool_results: list[dict[str, Any]] = [] # Track tool results - self.tool_calls_ended: set[str] = set() # Track which tool calls have had ToolCallEndEvent emitted - self.accumulated_text_content: str = "" # Track accumulated text for final MessagesSnapshotEvent - async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[BaseEvent]: """ Convert an AgentRunResponseUpdate to AG-UI events. @@ -154,7 +146,6 @@ def _handle_text_content(self, content: TextContent) -> list[BaseEvent]: message_id=self.current_message_id, delta=content.text, ) - self.accumulated_text_content += content.text logger.info(f" EMITTING TextMessageContentEvent with text_len={len(content.text)}") events.append(event) return events @@ -169,10 +160,10 @@ def _handle_function_call_content(self, content: FunctionCallContent) -> list[Ba logger.warning(f"FunctionCallContent missing name and call_id. args_length={args_length}") tool_call_id = self._coalesce_tool_call_id(content) + # Only emit ToolCallStartEvent once per tool call (when it's a new tool call) if content.name and tool_call_id != self.current_tool_call_id: self.streaming_tool_args = "" self.state_delta_count = 0 - if content.name: self.current_tool_call_id = tool_call_id self.current_tool_call_name = content.name @@ -183,17 +174,6 @@ def _handle_function_call_content(self, content: FunctionCallContent) -> list[Ba ) logger.info(f"Emitting ToolCallStartEvent with name='{content.name}', id='{tool_call_id}'") events.append(tool_start_event) - - self.pending_tool_calls.append( - { - "id": tool_call_id, - "type": "function", - "function": { - "name": content.name, - "arguments": "", - }, - } - ) elif tool_call_id: self.current_tool_call_id = tool_call_id @@ -206,13 +186,7 @@ def _handle_function_call_content(self, content: FunctionCallContent) -> list[Ba ) events.append(args_event) - for tool_call in self.pending_tool_calls: - if tool_call["id"] == tool_call_id: - tool_call["function"]["arguments"] += delta_str - break - events.extend(self._emit_predictive_state_deltas(delta_str)) - events.extend(self._legacy_predictive_state(content)) return events @@ -235,10 +209,8 @@ def _emit_predictive_state_deltas(self, argument_chunk: str) -> list[BaseEvent]: self.current_tool_call_name, ) - parsed_args = None - try: - parsed_args = json.loads(self.streaming_tool_args) - except json.JSONDecodeError: + parsed_args = safe_json_parse(self.streaming_tool_args) + if parsed_args is None: for state_key, config in self.predict_state_config.items(): if config["tool"] != self.current_tool_call_name: continue @@ -282,11 +254,8 @@ def _emit_predictive_state_deltas(self, argument_chunk: str) -> list[BaseEvent]: continue tool_arg_name = config["tool_argument"] - if tool_arg_name == "*": - state_value = parsed_args - elif tool_arg_name in parsed_args: - state_value = parsed_args[tool_arg_name] - else: + state_value = extract_state_from_tool_args(parsed_args, tool_arg_name) + if state_value is None: continue if state_key not in self.last_emitted_state or self.last_emitted_state[state_key] != state_value: @@ -317,59 +286,6 @@ def _emit_predictive_state_deltas(self, argument_chunk: str) -> list[BaseEvent]: self.pending_state_updates[state_key] = state_value return events - def _legacy_predictive_state(self, content: FunctionCallContent) -> list[BaseEvent]: - events: list[BaseEvent] = [] - if not (content.name and content.arguments): - return events - parsed_args = content.parse_arguments() - if not parsed_args: - return events - - logger.info( - "Checking predict_state_config keys: %s", - list(self.predict_state_config.keys()) if self.predict_state_config else "None", - ) - for state_key, config in self.predict_state_config.items(): - logger.info(f"Checking state_key='{state_key}'") - if config["tool"] != content.name: - continue - tool_arg_name = config["tool_argument"] - logger.info(f"MATCHED tool '{content.name}' for state key '{state_key}', arg='{tool_arg_name}'") - - state_value: Any - if tool_arg_name == "*": - state_value = parsed_args - logger.info(f"Using all args as state value, keys: {list(state_value.keys())}") - elif tool_arg_name in parsed_args: - state_value = parsed_args[tool_arg_name] - logger.info(f"Using specific arg '{tool_arg_name}' as state value") - else: - logger.warning(f"Tool argument '{tool_arg_name}' not found in parsed args") - continue - - previous_value = self.last_emitted_state.get(state_key, object()) - if previous_value == state_value: - logger.info( - "Skipping duplicate StateDeltaEvent for key '%s' - value unchanged", - state_key, - ) - continue - - state_delta_event = StateDeltaEvent( - delta=[ - { - "op": "replace", - "path": f"/{state_key}", - "value": state_value, - } - ], - ) - logger.info(f"Emitting StateDeltaEvent for key '{state_key}', value type: {type(state_value)}") # type: ignore - events.append(state_delta_event) - self.pending_state_updates[state_key] = state_value - self.last_emitted_state[state_key] = state_value - return events - def _handle_function_result_content(self, content: FunctionResultContent) -> list[BaseEvent]: events: list[BaseEvent] = [] if content.call_id: @@ -378,7 +294,6 @@ def _handle_function_result_content(self, content: FunctionResultContent) -> lis ) logger.info(f"Emitting ToolCallEndEvent for completed tool call '{content.call_id}'") events.append(end_event) - self.tool_calls_ended.add(content.call_id) if self.state_delta_count > 0: logger.info( @@ -391,12 +306,7 @@ def _handle_function_result_content(self, content: FunctionResultContent) -> lis self.state_delta_count = 0 result_message_id = generate_event_id() - if isinstance(content.result, dict): - result_content = json.dumps(content.result) # type: ignore[arg-type] - elif content.result is not None: - result_content = str(content.result) - else: - result_content = "" + result_content = prepare_function_call_results(content.result) result_event = ToolCallResultEvent( message_id=result_message_id, @@ -405,55 +315,10 @@ def _handle_function_result_content(self, content: FunctionResultContent) -> lis role="tool", ) events.append(result_event) - - self.tool_results.append( - { - "id": result_message_id, - "role": "tool", - "toolCallId": content.call_id, - "content": result_content, - } - ) - - events.extend(self._emit_snapshot_for_tool_result()) events.extend(self._emit_state_snapshot_and_confirmation()) return events - def _emit_snapshot_for_tool_result(self) -> list[BaseEvent]: - events: list[BaseEvent] = [] - should_emit_snapshot = self.pending_tool_calls and self.tool_results - - is_predictive_without_confirmation = False - if should_emit_snapshot and self.current_tool_call_name and self.predict_state_config: - for _, config in self.predict_state_config.items(): - if config["tool"] == self.current_tool_call_name and not self.require_confirmation: - is_predictive_without_confirmation = True - logger.info( - "Skipping intermediate MessagesSnapshotEvent for predictive tool '%s' - delaying until summary", - self.current_tool_call_name, - ) - break - - if should_emit_snapshot and not is_predictive_without_confirmation: - from ._message_adapters import agent_framework_messages_to_agui - - assistant_message = { - "id": generate_event_id(), - "role": "assistant", - "tool_calls": self.pending_tool_calls.copy(), - } - converted_input_messages = agent_framework_messages_to_agui(self.input_messages) - all_messages = converted_input_messages + [assistant_message] + self.tool_results.copy() - - messages_snapshot_event = MessagesSnapshotEvent( - type=EventType.MESSAGES_SNAPSHOT, - messages=all_messages, # type: ignore[arg-type] - ) - logger.info(f"Emitting MessagesSnapshotEvent with {len(all_messages)} messages") - events.append(messages_snapshot_event) - return events - def _emit_state_snapshot_and_confirmation(self) -> list[BaseEvent]: events: list[BaseEvent] = [] if self.pending_state_updates: @@ -502,31 +367,46 @@ def _emit_state_snapshot_and_confirmation(self) -> list[BaseEvent]: self.current_tool_call_name = None return events - def _emit_confirm_changes_tool_call(self) -> list[BaseEvent]: + def _emit_confirm_changes_tool_call(self, function_call: FunctionCallContent | None = None) -> list[BaseEvent]: + """Emit a confirm_changes tool call for Dojo UI compatibility. + + Args: + function_call: Optional function call that needs confirmation. + If provided, includes function info in the confirm_changes args + so Dojo UI can display what's being confirmed. + """ events: list[BaseEvent] = [] confirm_call_id = generate_event_id() logger.info("Emitting confirm_changes tool call for predictive update") - self.pending_tool_calls.append( - { - "id": confirm_call_id, - "type": "function", - "function": { - "name": "confirm_changes", - "arguments": "{}", - }, - } - ) - confirm_start = ToolCallStartEvent( tool_call_id=confirm_call_id, tool_call_name="confirm_changes", + parent_message_id=self.current_message_id, ) events.append(confirm_start) + # Include function info if this is for a function approval + # This helps Dojo UI display meaningful confirmation info + if function_call: + args_dict = { + "function_name": function_call.name, + "function_call_id": function_call.call_id, + "function_arguments": function_call.parse_arguments() or {}, + "steps": [ + { + "description": f"Execute {function_call.name}", + "status": "enabled", + } + ], + } + args_json = json.dumps(args_dict) + else: + args_json = "{}" + confirm_args = ToolCallArgsEvent( tool_call_id=confirm_call_id, - delta="{}", + delta=args_json, ) events.append(confirm_args) @@ -535,23 +415,48 @@ def _emit_confirm_changes_tool_call(self) -> list[BaseEvent]: ) events.append(confirm_end) - from ._message_adapters import agent_framework_messages_to_agui + self.should_stop_after_confirm = True + logger.info("Set flag to stop run after confirm_changes") + return events - assistant_message = { - "id": generate_event_id(), - "role": "assistant", - "tool_calls": self.pending_tool_calls.copy(), - } + def _emit_function_approval_tool_call(self, function_call: FunctionCallContent) -> list[BaseEvent]: + """Emit a tool call that can drive UI approval for function requests.""" + tool_call_name = "confirm_changes" + if self.approval_tool_name and self.approval_tool_name != function_call.name: + tool_call_name = self.approval_tool_name + + tool_call_id = generate_event_id() + tool_start = ToolCallStartEvent( + tool_call_id=tool_call_id, + tool_call_name=tool_call_name, + parent_message_id=self.current_message_id, + ) + events: list[BaseEvent] = [tool_start] - converted_input_messages = agent_framework_messages_to_agui(self.input_messages) - all_messages = converted_input_messages + [assistant_message] + self.tool_results.copy() + args_dict = { + "function_name": function_call.name, + "function_call_id": function_call.call_id, + "function_arguments": function_call.parse_arguments() or {}, + "steps": [ + { + "description": f"Execute {function_call.name}", + "status": "enabled", + } + ], + } + args_json = json.dumps(args_dict) - messages_snapshot_event = MessagesSnapshotEvent( - type=EventType.MESSAGES_SNAPSHOT, - messages=all_messages, # type: ignore[arg-type] + events.append( + ToolCallArgsEvent( + tool_call_id=tool_call_id, + delta=args_json, + ) + ) + events.append( + ToolCallEndEvent( + tool_call_id=tool_call_id, + ) ) - logger.info(f"Emitting MessagesSnapshotEvent for confirm_changes with {len(all_messages)} messages") - events.append(messages_snapshot_event) self.should_stop_after_confirm = True logger.info("Set flag to stop run after confirm_changes") @@ -583,12 +488,8 @@ def _handle_function_approval_request_content(self, content: FunctionApprovalReq tool_arg_name, ) - state_value: Any - if tool_arg_name == "*": - state_value = parsed_args - elif tool_arg_name in parsed_args: - state_value = parsed_args[tool_arg_name] - else: + state_value = extract_state_from_tool_args(parsed_args, tool_arg_name) + if state_value is None: logger.warning(f" Tool argument '{tool_arg_name}' not found in parsed args") continue @@ -605,8 +506,8 @@ def _handle_function_approval_request_content(self, content: FunctionApprovalReq ) logger.info(f"Emitting ToolCallEndEvent for approval-required tool '{content.function_call.call_id}'") events.append(end_event) - self.tool_calls_ended.add(content.function_call.call_id) + # Emit the function_approval_request custom event for UI implementations that support it approval_event = CustomEvent( name="function_approval_request", value={ @@ -620,6 +521,14 @@ def _handle_function_approval_request_content(self, content: FunctionApprovalReq ) logger.info(f"Emitting function_approval_request custom event for '{content.function_call.name}'") events.append(approval_event) + + # Emit a UI-friendly approval tool call for function approvals. + if self.require_confirmation: + events.extend(self._emit_function_approval_tool_call(content.function_call)) + + # Signal orchestrator to stop the run and wait for user approval response + self.should_stop_after_confirm = True + logger.info("Set flag to stop run - waiting for function approval response") return events def create_run_started_event(self) -> RunStartedEvent: diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py b/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py index 11d2977f90..1ff858e9f5 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py @@ -2,6 +2,8 @@ """Message format conversion between AG-UI and Agent Framework.""" +import json +import logging from typing import Any, cast from agent_framework import ( @@ -11,20 +13,229 @@ FunctionResultContent, Role, TextContent, + prepare_function_call_results, ) -# Role mapping constants -_AGUI_TO_FRAMEWORK_ROLE = { - "user": Role.USER, - "assistant": Role.ASSISTANT, - "system": Role.SYSTEM, -} +from ._utils import ( + AGUI_TO_FRAMEWORK_ROLE, + FRAMEWORK_TO_AGUI_ROLE, + get_role_value, + normalize_agui_role, + safe_json_parse, +) + +logger = logging.getLogger(__name__) + + +def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: + """Normalize tool ordering and inject synthetic results for AG-UI edge cases.""" + sanitized: list[ChatMessage] = [] + pending_tool_call_ids: set[str] | None = None + pending_confirm_changes_id: str | None = None + + for msg in messages: + role_value = get_role_value(msg) + + if role_value == "assistant": + tool_ids = { + str(content.call_id) + for content in msg.contents or [] + if isinstance(content, FunctionCallContent) and content.call_id + } + confirm_changes_call = None + for content in msg.contents or []: + if isinstance(content, FunctionCallContent) and content.name == "confirm_changes": + confirm_changes_call = content + break + + sanitized.append(msg) + pending_tool_call_ids = tool_ids if tool_ids else None + pending_confirm_changes_id = ( + str(confirm_changes_call.call_id) if confirm_changes_call and confirm_changes_call.call_id else None + ) + continue + + if role_value == "user": + approval_call_ids: set[str] = set() + approval_accepted: bool | None = None + for content in msg.contents or []: + if type(content) is FunctionApprovalResponseContent: + if content.function_call and content.function_call.call_id: + approval_call_ids.add(str(content.function_call.call_id)) + if approval_accepted is None: + approval_accepted = bool(content.approved) + else: + approval_accepted = approval_accepted and bool(content.approved) + + if approval_call_ids and pending_tool_call_ids: + pending_tool_call_ids -= approval_call_ids + logger.info( + f"FunctionApprovalResponseContent found for call_ids={sorted(approval_call_ids)} - " + "framework will handle execution" + ) + + if pending_confirm_changes_id and approval_accepted is not None: + logger.info(f"Injecting synthetic tool result for confirm_changes call_id={pending_confirm_changes_id}") + synthetic_result = ChatMessage( + role="tool", + contents=[ + FunctionResultContent( + call_id=pending_confirm_changes_id, + result="Confirmed" if approval_accepted else "Rejected", + ) + ], + ) + sanitized.append(synthetic_result) + if pending_tool_call_ids: + pending_tool_call_ids.discard(pending_confirm_changes_id) + pending_confirm_changes_id = None + + if pending_confirm_changes_id: + user_text = "" + for content in msg.contents or []: + if isinstance(content, TextContent): + user_text = content.text + break + + try: + parsed = json.loads(user_text) + if "accepted" in parsed: + logger.info( + f"Injecting synthetic tool result for confirm_changes call_id={pending_confirm_changes_id}" + ) + synthetic_result = ChatMessage( + role="tool", + contents=[ + FunctionResultContent( + call_id=pending_confirm_changes_id, + result="Confirmed" if parsed.get("accepted") else "Rejected", + ) + ], + ) + sanitized.append(synthetic_result) + if pending_tool_call_ids: + pending_tool_call_ids.discard(pending_confirm_changes_id) + pending_confirm_changes_id = None + continue + except (json.JSONDecodeError, KeyError) as exc: + logger.debug(f"Could not parse user message as confirm_changes response: {type(exc).__name__}") + + if pending_tool_call_ids: + logger.info( + f"User message arrived with {len(pending_tool_call_ids)} pending tool calls - " + "injecting synthetic results" + ) + for pending_call_id in pending_tool_call_ids: + logger.info(f"Injecting synthetic tool result for pending call_id={pending_call_id}") + synthetic_result = ChatMessage( + role="tool", + contents=[ + FunctionResultContent( + call_id=pending_call_id, + result="Tool execution skipped - user provided follow-up message", + ) + ], + ) + sanitized.append(synthetic_result) + pending_tool_call_ids = None + pending_confirm_changes_id = None + + sanitized.append(msg) + pending_confirm_changes_id = None + continue + + if role_value == "tool": + if not pending_tool_call_ids: + continue + keep = False + for content in msg.contents or []: + if isinstance(content, FunctionResultContent): + call_id = str(content.call_id) + if call_id in pending_tool_call_ids: + keep = True + if call_id == pending_confirm_changes_id: + pending_confirm_changes_id = None + break + if keep: + sanitized.append(msg) + continue + + sanitized.append(msg) + pending_tool_call_ids = None + pending_confirm_changes_id = None + + return sanitized + + +def _deduplicate_messages(messages: list[ChatMessage]) -> list[ChatMessage]: + """Remove duplicate messages while preserving order.""" + seen_keys: dict[Any, int] = {} + unique_messages: list[ChatMessage] = [] -_FRAMEWORK_TO_AGUI_ROLE = { - Role.USER: "user", - Role.ASSISTANT: "assistant", - Role.SYSTEM: "system", -} + for idx, msg in enumerate(messages): + role_value = get_role_value(msg) + + if role_value == "tool" and msg.contents and isinstance(msg.contents[0], FunctionResultContent): + call_id = str(msg.contents[0].call_id) + key: Any = (role_value, call_id) + + if key in seen_keys: + existing_idx = seen_keys[key] + existing_msg = unique_messages[existing_idx] + + existing_result = None + if existing_msg.contents and isinstance(existing_msg.contents[0], FunctionResultContent): + existing_result = existing_msg.contents[0].result + new_result = msg.contents[0].result + + if (not existing_result or existing_result == "") and new_result: + logger.info(f"Replacing empty tool result at index {existing_idx} with data from index {idx}") + unique_messages[existing_idx] = msg + else: + logger.info(f"Skipping duplicate tool result at index {idx}: call_id={call_id}") + continue + + seen_keys[key] = len(unique_messages) + unique_messages.append(msg) + + elif ( + role_value == "assistant" and msg.contents and any(isinstance(c, FunctionCallContent) for c in msg.contents) + ): + tool_call_ids = tuple( + sorted(str(c.call_id) for c in msg.contents if isinstance(c, FunctionCallContent) and c.call_id) + ) + key = (role_value, tool_call_ids) + + if key in seen_keys: + logger.info(f"Skipping duplicate assistant tool call at index {idx}") + continue + + seen_keys[key] = len(unique_messages) + unique_messages.append(msg) + + else: + content_str = str([str(c) for c in msg.contents]) if msg.contents else "" + key = (role_value, hash(content_str)) + + if key in seen_keys: + logger.info(f"Skipping duplicate message at index {idx}: role={role_value}") + continue + + seen_keys[key] = len(unique_messages) + unique_messages.append(msg) + + return unique_messages + + +def normalize_agui_input_messages( + messages: list[dict[str, Any]], +) -> tuple[list[ChatMessage], list[dict[str, Any]]]: + """Normalize raw AG-UI messages into provider and snapshot formats.""" + provider_messages = agui_messages_to_agent_framework(messages) + provider_messages = _sanitize_tool_history(provider_messages) + provider_messages = _deduplicate_messages(provider_messages) + snapshot_messages = agui_messages_to_snapshot_format(messages) + return provider_messages, snapshot_messages def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[ChatMessage]: @@ -36,11 +247,108 @@ def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[Cha Returns: List of Agent Framework ChatMessage objects """ + + def _update_tool_call_arguments( + raw_messages: list[dict[str, Any]], + tool_call_id: str, + modified_args: dict[str, Any], + ) -> None: + for raw_msg in raw_messages: + tool_calls = raw_msg.get("tool_calls") or raw_msg.get("toolCalls") + if not isinstance(tool_calls, list): + continue + tool_calls_list = cast(list[Any], tool_calls) + for tool_call in tool_calls_list: + if not isinstance(tool_call, dict): + continue + tool_call_dict = cast(dict[str, Any], tool_call) + if str(tool_call_dict.get("id", "")) != tool_call_id: + continue + function_payload = tool_call_dict.get("function") + if not isinstance(function_payload, dict): + return + function_payload_dict = cast(dict[str, Any], function_payload) + existing_args = function_payload_dict.get("arguments") + if isinstance(existing_args, str): + function_payload_dict["arguments"] = json.dumps(modified_args) + else: + function_payload_dict["arguments"] = modified_args + return + + def _find_matching_func_call(call_id: str) -> FunctionCallContent | None: + for prev_msg in result: + role_val = prev_msg.role.value if hasattr(prev_msg.role, "value") else str(prev_msg.role) + if role_val != "assistant": + continue + for content in prev_msg.contents or []: + if isinstance(content, FunctionCallContent): + if content.call_id == call_id and content.name != "confirm_changes": + return content + return None + + def _parse_arguments(arguments: Any) -> dict[str, Any] | None: + return safe_json_parse(arguments) + + def _resolve_approval_call_id(tool_call_id: str, parsed_payload: dict[str, Any] | None) -> str | None: + if parsed_payload: + explicit_call_id = parsed_payload.get("function_call_id") + if explicit_call_id: + return str(explicit_call_id) + + for prev_msg in result: + role_val = prev_msg.role.value if hasattr(prev_msg.role, "value") else str(prev_msg.role) + if role_val != "assistant": + continue + direct_call = None + confirm_call = None + sibling_calls: list[FunctionCallContent] = [] + for content in prev_msg.contents or []: + if not isinstance(content, FunctionCallContent): + continue + if content.call_id == tool_call_id: + direct_call = content + if content.name == "confirm_changes" and content.call_id == tool_call_id: + confirm_call = content + elif content.name != "confirm_changes": + sibling_calls.append(content) + + if direct_call: + direct_args = direct_call.parse_arguments() or {} + if isinstance(direct_args, dict): + explicit_call_id = direct_args.get("function_call_id") + if explicit_call_id: + return str(explicit_call_id) + + if not confirm_call: + continue + + confirm_args = confirm_call.parse_arguments() or {} + if isinstance(confirm_args, dict): + explicit_call_id = confirm_args.get("function_call_id") + if explicit_call_id: + return str(explicit_call_id) + + if len(sibling_calls) == 1 and sibling_calls[0].call_id: + return str(sibling_calls[0].call_id) + + return None + + def _filter_modified_args( + modified_args: dict[str, Any], + original_args: dict[str, Any] | None, + ) -> dict[str, Any]: + if not modified_args: + return {} + if not isinstance(original_args, dict) or not original_args: + return {} + allowed_keys = set(original_args.keys()) + return {key: value for key, value in modified_args.items() if key in allowed_keys} + result: list[ChatMessage] = [] for msg in messages: # Handle standard tool result messages early (role="tool") to preserve provider invariants # This path maps AG‑UI tool messages to FunctionResultContent with the correct tool_call_id - role_str = msg.get("role", "user") + role_str = normalize_agui_role(msg.get("role", "user")) if role_str == "tool": # Prefer explicit tool_call_id fields; fall back to backend fields only if necessary tool_call_id = msg.get("tool_call_id") or msg.get("toolCallId") @@ -57,31 +365,153 @@ def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[Cha result_content = msg.get("result", "") # Distinguish approval payloads from actual tool results - is_approval = False + parsed: dict[str, Any] | None = None if isinstance(result_content, str) and result_content: - import json as _json - try: - parsed = _json.loads(result_content) - is_approval = isinstance(parsed, dict) and "accepted" in parsed + parsed_candidate = json.loads(result_content) except Exception: - is_approval = False + parsed_candidate = None + if isinstance(parsed_candidate, dict): + parsed = cast(dict[str, Any], parsed_candidate) + elif isinstance(result_content, dict): + parsed = cast(dict[str, Any], result_content) + + is_approval = parsed is not None and "accepted" in parsed if is_approval: - # Approval responses should be treated as user messages to trigger human-in-the-loop flow - chat_msg = ChatMessage( - role=Role.USER, - contents=[TextContent(text=str(result_content))], - additional_properties={"is_tool_result": True, "tool_call_id": str(tool_call_id or "")}, - ) + # Look for the matching function call in previous messages to create + # a proper FunctionApprovalResponseContent. This enables the agent framework + # to execute the approved tool (fix for GitHub issue #3034). + accepted = parsed.get("accepted", False) if parsed is not None else False + approval_payload_text = result_content if isinstance(result_content, str) else json.dumps(parsed) + + # Log the full approval payload to debug modified arguments + import logging + + logger = logging.getLogger(__name__) + logger.info(f"Approval payload received: {parsed}") + + approval_call_id = tool_call_id + resolved_call_id = _resolve_approval_call_id(tool_call_id, parsed) + if resolved_call_id: + approval_call_id = resolved_call_id + matching_func_call = _find_matching_func_call(approval_call_id) + + if matching_func_call: + # Remove any existing tool result for this call_id since the framework + # will re-execute the tool after approval. Keeping old results causes + # OpenAI API errors ("tool message must follow assistant with tool_calls"). + result = [ + m + for m in result + if not ( + (m.role.value if hasattr(m.role, "value") else str(m.role)) == "tool" + and any( + isinstance(c, FunctionResultContent) and c.call_id == approval_call_id + for c in (m.contents or []) + ) + ) + ] + + # Check if the approval payload contains modified arguments + # The UI sends back the modified state (e.g., deselected steps) in the approval payload + modified_args = {k: v for k, v in parsed.items() if k != "accepted"} if parsed else {} + original_args = matching_func_call.parse_arguments() + filtered_args = _filter_modified_args(modified_args, original_args) + state_args: dict[str, Any] | None = None + if filtered_args: + original_args = original_args or {} + merged_args: dict[str, Any] + if isinstance(original_args, dict) and original_args: + merged_args = {**original_args, **filtered_args} + else: + merged_args = dict(filtered_args) + + if isinstance(filtered_args.get("steps"), list): + original_steps = original_args.get("steps") if isinstance(original_args, dict) else None + if isinstance(original_steps, list): + approved_steps_list = list(filtered_args.get("steps") or []) + approved_by_description: dict[str, dict[str, Any]] = {} + for step_item in approved_steps_list: + if isinstance(step_item, dict): + step_item_dict = cast(dict[str, Any], step_item) + desc = step_item_dict.get("description") + if desc: + approved_by_description[str(desc)] = step_item_dict + merged_steps: list[Any] = [] + original_steps_list = cast(list[Any], original_steps) + for orig_step in original_steps_list: + if not isinstance(orig_step, dict): + merged_steps.append(orig_step) + continue + orig_step_dict = cast(dict[str, Any], orig_step) + description = str(orig_step_dict.get("description", "")) + approved_step = approved_by_description.get(description) + status: str = ( + str(approved_step.get("status")) + if approved_step is not None and approved_step.get("status") + else "disabled" + ) + updated_step: dict[str, Any] = orig_step_dict.copy() + updated_step["status"] = status + merged_steps.append(updated_step) + merged_args["steps"] = merged_steps + state_args = merged_args + + # Keep the original tool call and AG-UI snapshot in sync with approved args. + updated_args = ( + json.dumps(merged_args) if isinstance(matching_func_call.arguments, str) else merged_args + ) + matching_func_call.arguments = updated_args + _update_tool_call_arguments(messages, str(approval_call_id), merged_args) + # Create a new FunctionCallContent with the modified arguments + func_call_for_approval = FunctionCallContent( + call_id=matching_func_call.call_id, + name=matching_func_call.name, + arguments=json.dumps(filtered_args), + ) + logger.info(f"Using modified arguments from approval: {filtered_args}") + else: + # No modified arguments - use the original function call + func_call_for_approval = matching_func_call + + # Create FunctionApprovalResponseContent for the agent framework + approval_response = FunctionApprovalResponseContent( + approved=accepted, + id=str(approval_call_id), + function_call=func_call_for_approval, + additional_properties={"ag_ui_state_args": state_args} if state_args else None, + ) + chat_msg = ChatMessage( + role=Role.USER, + contents=[approval_response], + ) + else: + # No matching function call found - this is likely a confirm_changes approval + # Keep the old behavior for backwards compatibility + chat_msg = ChatMessage( + role=Role.USER, + contents=[TextContent(text=approval_payload_text)], + additional_properties={"is_tool_result": True, "tool_call_id": str(tool_call_id or "")}, + ) if "id" in msg: chat_msg.message_id = msg["id"] result.append(chat_msg) continue + # Cast result_content to acceptable type for FunctionResultContent + func_result: str | dict[str, Any] | list[Any] + if isinstance(result_content, str): + func_result = result_content + elif isinstance(result_content, dict): + func_result = cast(dict[str, Any], result_content) + elif isinstance(result_content, list): + func_result = cast(list[Any], result_content) + else: + func_result = str(result_content) chat_msg = ChatMessage( role=Role.TOOL, - contents=[FunctionResultContent(call_id=str(tool_call_id), result=result_content)], + contents=[FunctionResultContent(call_id=str(tool_call_id), result=func_result)], ) if "id" in msg: chat_msg.message_id = msg["id"] @@ -142,7 +572,7 @@ def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[Cha # No special handling required for assistant/plain messages here - role = _AGUI_TO_FRAMEWORK_ROLE.get(role_str, Role.USER) + role = AGUI_TO_FRAMEWORK_ROLE.get(role_str, Role.USER) # Check if this message contains function approvals if "function_approvals" in msg and msg["function_approvals"]: @@ -198,6 +628,7 @@ def agent_framework_messages_to_agui(messages: list[ChatMessage] | list[dict[str if isinstance(msg, dict): # Always work on a copy to avoid mutating input normalized_msg = msg.copy() + normalized_msg["role"] = normalize_agui_role(normalized_msg.get("role")) # Ensure ID exists if "id" not in normalized_msg: normalized_msg["id"] = generate_event_id() @@ -214,7 +645,7 @@ def agent_framework_messages_to_agui(messages: list[ChatMessage] | list[dict[str continue # Convert ChatMessage to AG-UI format - role = _FRAMEWORK_TO_AGUI_ROLE.get(msg.role, "user") + role = FRAMEWORK_TO_AGUI_ROLE.get(msg.role, "user") content_text = "" tool_calls: list[dict[str, Any]] = [] @@ -237,13 +668,8 @@ def agent_framework_messages_to_agui(messages: list[ChatMessage] | list[dict[str elif isinstance(content, FunctionResultContent): # Tool result content - extract call_id and result tool_result_call_id = content.call_id - # Serialize result to string - if isinstance(content.result, dict): - import json - - content_text = json.dumps(content.result) # type: ignore - elif content.result is not None: - content_text = str(content.result) + # Serialize result to string using core utility + content_text = prepare_function_call_results(content.result) agui_msg: dict[str, Any] = { "id": msg.message_id if msg.message_id else generate_event_id(), # Always include id @@ -308,22 +734,44 @@ def agui_messages_to_snapshot_format(messages: list[dict[str, Any]]) -> list[dic content = normalized_msg.get("content") if isinstance(content, list): # Convert content array format to simple string - text_parts = [] - for item in content: + text_parts: list[str] = [] + content_list = cast(list[Any], content) + for item in content_list: if isinstance(item, dict): + item_dict = cast(dict[str, Any], item) # Convert 'input_text' to 'text' type - if item.get("type") == "input_text": - text_parts.append(item.get("text", "")) - elif item.get("type") == "text": - text_parts.append(item.get("text", "")) + if item_dict.get("type") == "input_text": + text_parts.append(str(item_dict.get("text", ""))) + elif item_dict.get("type") == "text": + text_parts.append(str(item_dict.get("text", ""))) else: # Other types - just extract text field if present - text_parts.append(item.get("text", "")) + text_parts.append(str(item_dict.get("text", ""))) normalized_msg["content"] = "".join(text_parts) elif content is None: normalized_msg["content"] = "" + tool_calls = normalized_msg.get("tool_calls") or normalized_msg.get("toolCalls") + if isinstance(tool_calls, list): + tool_calls_list = cast(list[Any], tool_calls) + for tool_call in tool_calls_list: + if not isinstance(tool_call, dict): + continue + tool_call_dict = cast(dict[str, Any], tool_call) + function_payload = tool_call_dict.get("function") + if not isinstance(function_payload, dict): + continue + function_payload_dict = cast(dict[str, Any], function_payload) + if "arguments" not in function_payload_dict: + continue + arguments = function_payload_dict.get("arguments") + if arguments is None: + function_payload_dict["arguments"] = "" + elif not isinstance(arguments, str): + function_payload_dict["arguments"] = json.dumps(arguments) + # Normalize tool_call_id to toolCallId for tool messages + normalized_msg["role"] = normalize_agui_role(normalized_msg.get("role")) if normalized_msg.get("role") == "tool": if "tool_call_id" in normalized_msg: normalized_msg["toolCallId"] = normalized_msg["tool_call_id"] diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_helpers.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_helpers.py new file mode 100644 index 0000000000..ebf6ef6f57 --- /dev/null +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_helpers.py @@ -0,0 +1,391 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Helper functions for orchestration logic.""" + +import json +import logging +from typing import TYPE_CHECKING, Any + +from ag_ui.core import StateSnapshotEvent +from agent_framework import ( + ChatMessage, + FunctionApprovalResponseContent, + FunctionCallContent, + FunctionResultContent, + TextContent, +) + +from .._utils import get_role_value, safe_json_parse + +if TYPE_CHECKING: + from .._events import AgentFrameworkEventBridge + from ._state_manager import StateManager + +logger = logging.getLogger(__name__) + + +def pending_tool_call_ids(messages: list[ChatMessage]) -> set[str]: + """Get IDs of tool calls without corresponding results. + + Args: + messages: List of messages to scan + + Returns: + Set of pending tool call IDs + """ + pending_ids: set[str] = set() + resolved_ids: set[str] = set() + for msg in messages: + for content in msg.contents: + if isinstance(content, FunctionCallContent) and content.call_id: + pending_ids.add(str(content.call_id)) + elif isinstance(content, FunctionResultContent) and content.call_id: + resolved_ids.add(str(content.call_id)) + return pending_ids - resolved_ids + + +def is_state_context_message(message: ChatMessage) -> bool: + """Check if a message is a state context system message. + + Args: + message: Message to check + + Returns: + True if this is a state context message + """ + if get_role_value(message) != "system": + return False + for content in message.contents: + if isinstance(content, TextContent) and content.text.startswith("Current state of the application:"): + return True + return False + + +def ensure_tool_call_entry( + tool_call_id: str, + tool_calls_by_id: dict[str, dict[str, Any]], + pending_tool_calls: list[dict[str, Any]], +) -> dict[str, Any]: + """Get or create a tool call entry in the tracking dicts. + + Args: + tool_call_id: The tool call ID + tool_calls_by_id: Dict mapping IDs to tool call entries + pending_tool_calls: List of pending tool calls + + Returns: + The tool call entry dict + """ + entry = tool_calls_by_id.get(tool_call_id) + if entry is None: + entry = { + "id": tool_call_id, + "type": "function", + "function": { + "name": "", + "arguments": "", + }, + } + tool_calls_by_id[tool_call_id] = entry + pending_tool_calls.append(entry) + return entry + + +def tool_name_for_call_id( + tool_calls_by_id: dict[str, dict[str, Any]], + tool_call_id: str, +) -> str | None: + """Get the tool name for a given call ID. + + Args: + tool_calls_by_id: Dict mapping IDs to tool call entries + tool_call_id: The tool call ID to look up + + Returns: + Tool name or None if not found + """ + entry = tool_calls_by_id.get(tool_call_id) + if not entry: + return None + function = entry.get("function") + if not isinstance(function, dict): + return None + name = function.get("name") + return str(name) if name else None + + +def tool_calls_match_state( + provider_messages: list[ChatMessage], + state_manager: "StateManager", +) -> bool: + """Check if tool calls in messages match current state. + + Args: + provider_messages: Messages to check + state_manager: State manager with config and current state + + Returns: + True if tool calls match state configuration + """ + if not state_manager.predict_state_config or not state_manager.current_state: + return False + + for state_key, config in state_manager.predict_state_config.items(): + tool_name = config["tool"] + tool_arg_name = config["tool_argument"] + tool_args: dict[str, Any] | None = None + + for msg in reversed(provider_messages): + if get_role_value(msg) != "assistant": + continue + for content in msg.contents: + if isinstance(content, FunctionCallContent) and content.name == tool_name: + tool_args = safe_json_parse(content.arguments) + break + if tool_args is not None: + break + + if not tool_args: + return False + + if tool_arg_name == "*": + state_value = tool_args + elif tool_arg_name in tool_args: + state_value = tool_args[tool_arg_name] + else: + return False + + if state_manager.current_state.get(state_key) != state_value: + return False + + return True + + +def schema_has_steps(schema: Any) -> bool: + """Check if a schema has a steps array property. + + Args: + schema: JSON schema to check + + Returns: + True if schema has steps array + """ + if not isinstance(schema, dict): + return False + properties = schema.get("properties") + if not isinstance(properties, dict): + return False + steps_schema = properties.get("steps") + if not isinstance(steps_schema, dict): + return False + return steps_schema.get("type") == "array" + + +def select_approval_tool_name(client_tools: list[Any] | None) -> str | None: + """Select appropriate approval tool from client tools. + + Args: + client_tools: List of client tool definitions + + Returns: + Name of approval tool, or None if not found + """ + if not client_tools: + return None + for tool in client_tools: + tool_name = getattr(tool, "name", None) + if not tool_name: + continue + params_fn = getattr(tool, "parameters", None) + if not callable(params_fn): + continue + schema = params_fn() + if schema_has_steps(schema): + return str(tool_name) + return None + + +def select_messages_to_run( + provider_messages: list[ChatMessage], + state_manager: "StateManager", +) -> list[ChatMessage]: + """Select and prepare messages for agent execution. + + Injects state context message when appropriate. + + Args: + provider_messages: Original messages from client + state_manager: State manager instance + + Returns: + Messages ready for agent execution + """ + if not provider_messages: + return [] + + is_new_user_turn = get_role_value(provider_messages[-1]) == "user" + conversation_has_tool_calls = tool_calls_match_state(provider_messages, state_manager) + state_context_msg = state_manager.state_context_message( + is_new_user_turn=is_new_user_turn, conversation_has_tool_calls=conversation_has_tool_calls + ) + if not state_context_msg: + return list(provider_messages) + + messages_to_run = [msg for msg in provider_messages if not is_state_context_message(msg)] + if pending_tool_call_ids(messages_to_run): + return messages_to_run + + insert_index = len(messages_to_run) - 1 if is_new_user_turn else len(messages_to_run) + if insert_index < 0: + insert_index = 0 + messages_to_run.insert(insert_index, state_context_msg) + return messages_to_run + + +def build_safe_metadata(thread_metadata: dict[str, Any] | None) -> dict[str, Any]: + """Build metadata dict with truncated string values. + + Args: + thread_metadata: Raw metadata dict + + Returns: + Metadata with string values truncated to 512 chars + """ + if not thread_metadata: + return {} + safe_metadata: dict[str, Any] = {} + for key, value in thread_metadata.items(): + value_str = value if isinstance(value, str) else json.dumps(value) + if len(value_str) > 512: + value_str = value_str[:512] + safe_metadata[key] = value_str + return safe_metadata + + +def collect_approved_state_snapshots( + provider_messages: list[ChatMessage], + predict_state_config: dict[str, dict[str, str]] | None, + current_state: dict[str, Any], + event_bridge: "AgentFrameworkEventBridge", +) -> list[StateSnapshotEvent]: + """Collect state snapshots from approved function calls. + + Args: + provider_messages: Messages containing approvals + predict_state_config: Predictive state configuration + current_state: Current state dict (will be mutated) + event_bridge: Event bridge for creating events + + Returns: + List of state snapshot events + """ + if not predict_state_config: + return [] + + events: list[StateSnapshotEvent] = [] + for msg in provider_messages: + if get_role_value(msg) != "user": + continue + for content in msg.contents: + if type(content) is FunctionApprovalResponseContent: + if not content.function_call or not content.approved: + continue + parsed_args = content.function_call.parse_arguments() + state_args = None + if content.additional_properties: + state_args = content.additional_properties.get("ag_ui_state_args") + if not isinstance(state_args, dict): + state_args = parsed_args + if not state_args: + continue + for state_key, config in predict_state_config.items(): + if config["tool"] != content.function_call.name: + continue + tool_arg_name = config["tool_argument"] + if tool_arg_name == "*": + state_value = state_args + elif isinstance(state_args, dict) and tool_arg_name in state_args: + state_value = state_args[tool_arg_name] + else: + continue + current_state[state_key] = state_value + event_bridge.current_state[state_key] = state_value + logger.info( + f"Emitting StateSnapshotEvent for approved state key '{state_key}' " + f"with {len(state_value) if isinstance(state_value, list) else 'N/A'} items" + ) + events.append(StateSnapshotEvent(snapshot=current_state)) + break + return events + + +def latest_approval_response(messages: list[ChatMessage]) -> FunctionApprovalResponseContent | None: + """Get the latest approval response from messages. + + Args: + messages: Messages to search + + Returns: + Latest approval response or None + """ + if not messages: + return None + last_message = messages[-1] + for content in last_message.contents: + if type(content) is FunctionApprovalResponseContent: + return content + return None + + +def approval_steps(approval: FunctionApprovalResponseContent) -> list[Any]: + """Extract steps from an approval response. + + Args: + approval: Approval response content + + Returns: + List of steps, or empty list if none + """ + state_args: Any | None = None + if approval.additional_properties: + state_args = approval.additional_properties.get("ag_ui_state_args") + if isinstance(state_args, dict): + steps = state_args.get("steps") + if isinstance(steps, list): + return steps + + if approval.function_call: + parsed_args = approval.function_call.parse_arguments() + if isinstance(parsed_args, dict): + steps = parsed_args.get("steps") + if isinstance(steps, list): + return steps + + return [] + + +def is_step_based_approval( + approval: FunctionApprovalResponseContent, + predict_state_config: dict[str, dict[str, str]] | None, +) -> bool: + """Check if an approval is step-based. + + Args: + approval: Approval response to check + predict_state_config: Predictive state configuration + + Returns: + True if this is a step-based approval + """ + steps = approval_steps(approval) + if steps: + return True + if not approval.function_call: + return False + if not predict_state_config: + return False + tool_name = approval.function_call.name + for config in predict_state_config.values(): + if config.get("tool") == tool_name and config.get("tool_argument") == "steps": + return True + return False diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_message_hygiene.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_message_hygiene.py deleted file mode 100644 index 97c990781b..0000000000 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_message_hygiene.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Message hygiene utilities for orchestrators.""" - -import json -import logging -from typing import Any - -from agent_framework import ChatMessage, FunctionCallContent, FunctionResultContent, TextContent - -logger = logging.getLogger(__name__) - - -def sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: - """Normalize tool ordering and inject synthetic results for AG-UI edge cases.""" - sanitized: list[ChatMessage] = [] - pending_tool_call_ids: set[str] | None = None - pending_confirm_changes_id: str | None = None - - for msg in messages: - role_value = msg.role.value if hasattr(msg.role, "value") else str(msg.role) - - if role_value == "assistant": - tool_ids = { - str(content.call_id) - for content in msg.contents or [] - if isinstance(content, FunctionCallContent) and content.call_id - } - confirm_changes_call = None - for content in msg.contents or []: - if isinstance(content, FunctionCallContent) and content.name == "confirm_changes": - confirm_changes_call = content - break - - sanitized.append(msg) - pending_tool_call_ids = tool_ids if tool_ids else None - pending_confirm_changes_id = ( - str(confirm_changes_call.call_id) if confirm_changes_call and confirm_changes_call.call_id else None - ) - continue - - if role_value == "user": - if pending_confirm_changes_id: - user_text = "" - for content in msg.contents or []: - if isinstance(content, TextContent): - user_text = content.text - break - - try: - parsed = json.loads(user_text) - if "accepted" in parsed: - logger.info( - f"Injecting synthetic tool result for confirm_changes call_id={pending_confirm_changes_id}" - ) - synthetic_result = ChatMessage( - role="tool", - contents=[ - FunctionResultContent( - call_id=pending_confirm_changes_id, - result="Confirmed" if parsed.get("accepted") else "Rejected", - ) - ], - ) - sanitized.append(synthetic_result) - if pending_tool_call_ids: - pending_tool_call_ids.discard(pending_confirm_changes_id) - pending_confirm_changes_id = None - continue - except (json.JSONDecodeError, KeyError) as exc: - logger.debug("Could not parse user message as confirm_changes response: %s", type(exc).__name__) - - if pending_tool_call_ids: - logger.info( - f"User message arrived with {len(pending_tool_call_ids)} pending tool calls - injecting synthetic results" - ) - for pending_call_id in pending_tool_call_ids: - logger.info(f"Injecting synthetic tool result for pending call_id={pending_call_id}") - synthetic_result = ChatMessage( - role="tool", - contents=[ - FunctionResultContent( - call_id=pending_call_id, - result="Tool execution skipped - user provided follow-up message", - ) - ], - ) - sanitized.append(synthetic_result) - pending_tool_call_ids = None - pending_confirm_changes_id = None - - sanitized.append(msg) - pending_confirm_changes_id = None - continue - - if role_value == "tool": - if not pending_tool_call_ids: - continue - keep = False - for content in msg.contents or []: - if isinstance(content, FunctionResultContent): - call_id = str(content.call_id) - if call_id in pending_tool_call_ids: - keep = True - if call_id == pending_confirm_changes_id: - pending_confirm_changes_id = None - break - if keep: - sanitized.append(msg) - continue - - sanitized.append(msg) - pending_tool_call_ids = None - pending_confirm_changes_id = None - - return sanitized - - -def deduplicate_messages(messages: list[ChatMessage]) -> list[ChatMessage]: - """Remove duplicate messages while preserving order.""" - seen_keys: dict[Any, int] = {} - unique_messages: list[ChatMessage] = [] - - for idx, msg in enumerate(messages): - role_value = msg.role.value if hasattr(msg.role, "value") else str(msg.role) - - if role_value == "tool" and msg.contents and isinstance(msg.contents[0], FunctionResultContent): - call_id = str(msg.contents[0].call_id) - key: Any = (role_value, call_id) - - if key in seen_keys: - existing_idx = seen_keys[key] - existing_msg = unique_messages[existing_idx] - - existing_result = None - if existing_msg.contents and isinstance(existing_msg.contents[0], FunctionResultContent): - existing_result = existing_msg.contents[0].result - new_result = msg.contents[0].result - - if (not existing_result or existing_result == "") and new_result: - logger.info(f"Replacing empty tool result at index {existing_idx} with data from index {idx}") - unique_messages[existing_idx] = msg - else: - logger.info(f"Skipping duplicate tool result at index {idx}: call_id={call_id}") - continue - - seen_keys[key] = len(unique_messages) - unique_messages.append(msg) - - elif ( - role_value == "assistant" and msg.contents and any(isinstance(c, FunctionCallContent) for c in msg.contents) - ): - tool_call_ids = tuple( - sorted(str(c.call_id) for c in msg.contents if isinstance(c, FunctionCallContent) and c.call_id) - ) - key = (role_value, tool_call_ids) - - if key in seen_keys: - logger.info(f"Skipping duplicate assistant tool call at index {idx}") - continue - - seen_keys[key] = len(unique_messages) - unique_messages.append(msg) - - else: - content_str = str([str(c) for c in msg.contents]) if msg.contents else "" - key = (role_value, hash(content_str)) - - if key in seen_keys: - logger.info(f"Skipping duplicate message at index {idx}: role={role_value}") - continue - - seen_keys[key] = len(unique_messages) - unique_messages.append(msg) - - return unique_messages diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_predictive_state.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_predictive_state.py new file mode 100644 index 0000000000..8662036bbf --- /dev/null +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_predictive_state.py @@ -0,0 +1,230 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Predictive state handling utilities.""" + +import json +import logging +import re +from typing import Any + +from ag_ui.core import StateDeltaEvent + +from .._utils import safe_json_parse + +logger = logging.getLogger(__name__) + + +class PredictiveStateHandler: + """Handles predictive state updates from streaming tool calls.""" + + def __init__( + self, + predict_state_config: dict[str, dict[str, str]] | None = None, + current_state: dict[str, Any] | None = None, + ) -> None: + """Initialize the handler. + + Args: + predict_state_config: Configuration mapping state keys to tool/argument pairs + current_state: Reference to current state dict + """ + self.predict_state_config = predict_state_config or {} + self.current_state = current_state or {} + self.streaming_tool_args: str = "" + self.last_emitted_state: dict[str, Any] = {} + self.state_delta_count: int = 0 + self.pending_state_updates: dict[str, Any] = {} + + def reset_streaming(self) -> None: + """Reset streaming state for a new tool call.""" + self.streaming_tool_args = "" + self.state_delta_count = 0 + + def extract_state_value( + self, + tool_name: str, + args: dict[str, Any] | str | None, + ) -> tuple[str, Any] | None: + """Extract state value from tool arguments based on config. + + Args: + tool_name: Name of the tool being called + args: Tool arguments (dict or JSON string) + + Returns: + Tuple of (state_key, state_value) or None if no match + """ + if not self.predict_state_config: + return None + + parsed_args = safe_json_parse(args) if isinstance(args, str) else args + if not parsed_args: + return None + + for state_key, config in self.predict_state_config.items(): + if config["tool"] != tool_name: + continue + tool_arg_name = config["tool_argument"] + if tool_arg_name == "*": + return (state_key, parsed_args) + if tool_arg_name in parsed_args: + return (state_key, parsed_args[tool_arg_name]) + + return None + + def is_predictive_tool(self, tool_name: str | None) -> bool: + """Check if a tool is configured for predictive state. + + Args: + tool_name: Name of the tool to check + + Returns: + True if tool is in predictive state config + """ + if not tool_name or not self.predict_state_config: + return False + for config in self.predict_state_config.values(): + if config["tool"] == tool_name: + return True + return False + + def emit_streaming_deltas( + self, + tool_name: str | None, + argument_chunk: str, + ) -> list[StateDeltaEvent]: + """Process streaming argument chunk and emit state deltas. + + Args: + tool_name: Name of the current tool + argument_chunk: New chunk of JSON arguments + + Returns: + List of state delta events to emit + """ + events: list[StateDeltaEvent] = [] + if not tool_name or not self.predict_state_config: + return events + + self.streaming_tool_args += argument_chunk + logger.debug( + "Predictive state: accumulated %s chars for tool '%s'", + len(self.streaming_tool_args), + tool_name, + ) + + # Try to parse complete JSON first + parsed_args = None + try: + parsed_args = json.loads(self.streaming_tool_args) + except json.JSONDecodeError: + # Fall back to regex matching for partial JSON + events.extend(self._emit_partial_deltas(tool_name)) + + if parsed_args: + events.extend(self._emit_complete_deltas(tool_name, parsed_args)) + + return events + + def _emit_partial_deltas(self, tool_name: str) -> list[StateDeltaEvent]: + """Emit deltas from partial JSON using regex matching. + + Args: + tool_name: Name of the current tool + + Returns: + List of state delta events + """ + events: list[StateDeltaEvent] = [] + + for state_key, config in self.predict_state_config.items(): + if config["tool"] != tool_name: + continue + tool_arg_name = config["tool_argument"] + pattern = rf'"{re.escape(tool_arg_name)}":\s*"([^"]*)' + match = re.search(pattern, self.streaming_tool_args) + + if match: + partial_value = match.group(1).replace("\\n", "\n").replace('\\"', '"').replace("\\\\", "\\") + + if state_key not in self.last_emitted_state or self.last_emitted_state[state_key] != partial_value: + event = self._create_delta_event(state_key, partial_value) + events.append(event) + self.last_emitted_state[state_key] = partial_value + self.pending_state_updates[state_key] = partial_value + + return events + + def _emit_complete_deltas( + self, + tool_name: str, + parsed_args: dict[str, Any], + ) -> list[StateDeltaEvent]: + """Emit deltas from complete parsed JSON. + + Args: + tool_name: Name of the current tool + parsed_args: Fully parsed arguments dict + + Returns: + List of state delta events + """ + events: list[StateDeltaEvent] = [] + + for state_key, config in self.predict_state_config.items(): + if config["tool"] != tool_name: + continue + tool_arg_name = config["tool_argument"] + + if tool_arg_name == "*": + state_value = parsed_args + elif tool_arg_name in parsed_args: + state_value = parsed_args[tool_arg_name] + else: + continue + + if state_key not in self.last_emitted_state or self.last_emitted_state[state_key] != state_value: + event = self._create_delta_event(state_key, state_value) + events.append(event) + self.last_emitted_state[state_key] = state_value + self.pending_state_updates[state_key] = state_value + + return events + + def _create_delta_event(self, state_key: str, value: Any) -> StateDeltaEvent: + """Create a state delta event with logging. + + Args: + state_key: The state key being updated + value: The new value + + Returns: + StateDeltaEvent instance + """ + self.state_delta_count += 1 + if self.state_delta_count % 10 == 1: + logger.info( + "StateDeltaEvent #%s for '%s': op=replace, path=/%s, value_length=%s", + self.state_delta_count, + state_key, + state_key, + len(str(value)), + ) + elif self.state_delta_count % 100 == 0: + logger.info(f"StateDeltaEvent #{self.state_delta_count} emitted") + + return StateDeltaEvent( + delta=[ + { + "op": "replace", + "path": f"/{state_key}", + "value": value, + } + ], + ) + + def apply_pending_updates(self) -> None: + """Apply pending updates to current state and clear them.""" + for key, value in self.pending_state_updates.items(): + self.current_state[key] = value + self.pending_state_updates.clear() diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_state_manager.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_state_manager.py index 45c16afef4..7d8a23d84c 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_state_manager.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_state_manager.py @@ -22,9 +22,11 @@ def __init__( self.predict_state_config = predict_state_config or {} self.require_confirmation = require_confirmation self.current_state: dict[str, Any] = {} + self._state_from_input: bool = False def initialize(self, initial_state: dict[str, Any] | None) -> dict[str, Any]: """Initialize state with schema defaults.""" + self._state_from_input = initial_state is not None self.current_state = (initial_state or {}).copy() self._apply_schema_defaults() return self.current_state @@ -60,7 +62,9 @@ def state_context_message(self, is_new_user_turn: bool, conversation_has_tool_ca """Inject state context only when starting a new user turn.""" if not self.current_state or not self.state_schema: return None - if not is_new_user_turn or conversation_has_tool_calls: + if not is_new_user_turn: + return None + if conversation_has_tool_calls and not self._state_from_input: return None state_json = json.dumps(self.current_state, indent=2) diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py index 6bdff552b6..3067e3e4a7 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py @@ -16,6 +16,10 @@ TextMessageContentEvent, TextMessageEndEvent, TextMessageStartEvent, + ToolCallArgsEvent, + ToolCallEndEvent, + ToolCallResultEvent, + ToolCallStartEvent, ) from agent_framework import ( AgentProtocol, @@ -25,8 +29,31 @@ FunctionResultContent, TextContent, ) +from agent_framework._middleware import extract_and_merge_function_middleware +from agent_framework._tools import ( + FunctionInvocationConfiguration, + _collect_approval_responses, # type: ignore + _replace_approval_contents_with_results, # type: ignore + _try_execute_function_calls, # type: ignore +) -from ._utils import convert_agui_tools_to_agent_framework, generate_event_id +from ._orchestration._helpers import ( + approval_steps, + build_safe_metadata, + collect_approved_state_snapshots, + ensure_tool_call_entry, + is_step_based_approval, + latest_approval_response, + select_approval_tool_name, + select_messages_to_run, + tool_name_for_call_id, +) +from ._orchestration._tooling import ( + collect_server_tools, + merge_tools, + register_additional_client_tools, +) +from ._utils import convert_agui_tools_to_agent_framework, generate_event_id, get_role_value if TYPE_CHECKING: from ._agent import AgentConfig @@ -61,6 +88,7 @@ def __init__( # Lazy-loaded properties self._messages = None + self._snapshot_messages = None self._last_message = None self._run_id: str | None = None self._thread_id: str | None = None @@ -69,12 +97,27 @@ def __init__( def messages(self): """Get converted Agent Framework messages (lazy loaded).""" if self._messages is None: - from ._message_adapters import agui_messages_to_agent_framework + from ._message_adapters import normalize_agui_input_messages raw = self.input_data.get("messages", []) - self._messages = agui_messages_to_agent_framework(raw) + if not isinstance(raw, list): + raw = [] + self._messages, self._snapshot_messages = normalize_agui_input_messages(raw) return self._messages + @property + def snapshot_messages(self) -> list[dict[str, Any]]: + """Get normalized AG-UI snapshot messages (lazy loaded).""" + if self._snapshot_messages is None: + if self._messages is None: + _ = self.messages + else: + from ._message_adapters import agent_framework_messages_to_agui, agui_messages_to_snapshot_format + + raw_snapshot = agent_framework_messages_to_agui(self._messages) + self._snapshot_messages = agui_messages_to_snapshot_format(raw_snapshot) + return self._snapshot_messages or [] + @property def last_message(self): """Get the last message in the conversation (lazy loaded).""" @@ -270,14 +313,7 @@ async def run( AG-UI events """ from ._events import AgentFrameworkEventBridge - from ._message_adapters import agui_messages_to_snapshot_format - from ._orchestration._message_hygiene import deduplicate_messages, sanitize_tool_history from ._orchestration._state_manager import StateManager - from ._orchestration._tooling import ( - collect_server_tools, - merge_tools, - register_additional_client_tools, - ) logger.info(f"Starting default agent run for thread_id={context.thread_id}, run_id={context.run_id}") @@ -286,12 +322,15 @@ async def run( response_format = context.agent.chat_options.response_format skip_text_content = response_format is not None + client_tools = convert_agui_tools_to_agent_framework(context.input_data.get("tools")) + approval_tool_name = select_approval_tool_name(client_tools) + state_manager = StateManager( state_schema=context.config.state_schema, predict_state_config=context.config.predict_state_config, require_confirmation=context.config.require_confirmation, ) - current_state = state_manager.initialize(context.input_data.get("state", {})) + current_state = state_manager.initialize(context.input_data.get("state")) event_bridge = AgentFrameworkEventBridge( run_id=context.run_id, @@ -299,8 +338,8 @@ async def run( predict_state_config=context.config.predict_state_config, current_state=current_state, skip_text_content=skip_text_content, - input_messages=context.input_data.get("messages", []), require_confirmation=context.config.require_confirmation, + approval_tool_name=approval_tool_name, ) yield event_bridge.create_run_started_event() @@ -321,17 +360,18 @@ async def run( if current_state: thread.metadata["current_state"] = current_state # type: ignore[attr-defined] - raw_messages = context.messages or [] - if not raw_messages: + provider_messages = context.messages or [] + snapshot_messages = context.snapshot_messages + if not provider_messages: logger.warning("No messages provided in AG-UI input") yield event_bridge.create_run_finished_event() return - logger.info(f"Received {len(raw_messages)} raw messages from client") - for i, msg in enumerate(raw_messages): - role = msg.role.value if hasattr(msg.role, "value") else str(msg.role) + logger.info(f"Received {len(provider_messages)} provider messages from client") + for i, msg in enumerate(provider_messages): + role = get_role_value(msg) msg_id = getattr(msg, "message_id", None) - logger.info(f" Raw message {i}: role={role}, id={msg_id}") + logger.info(f" Message {i}: role={role}, id={msg_id}") if hasattr(msg, "contents") and msg.contents: for j, content in enumerate(msg.contents): content_type = type(content).__name__ @@ -354,62 +394,26 @@ async def run( else: logger.debug(f" Content {j}: {content_type}") - sanitized_messages = sanitize_tool_history(raw_messages) - provider_messages = deduplicate_messages(sanitized_messages) - - if not provider_messages: - logger.info("No provider-eligible messages after filtering; finishing run without invoking agent.") - yield event_bridge.create_run_finished_event() - return - - logger.info(f"Processing {len(provider_messages)} provider messages after sanitization/deduplication") - for i, msg in enumerate(provider_messages): - role = msg.role.value if hasattr(msg.role, "value") else str(msg.role) - logger.info(f" Message {i}: role={role}") - if hasattr(msg, "contents") and msg.contents: - for j, content in enumerate(msg.contents): - content_type = type(content).__name__ - if isinstance(content, TextContent): - logger.info(f" Content {j}: {content_type} - text_length={len(content.text)}") - elif isinstance(content, FunctionCallContent): - arg_length = len(str(content.arguments)) if content.arguments else 0 - logger.info(" Content %s: %s - %s args_length=%s", j, content_type, content.name, arg_length) - elif isinstance(content, FunctionResultContent): - result_preview = type(content.result).__name__ if content.result is not None else "None" - logger.info( - " Content %s: %s - call_id=%s, result_type=%s", - j, - content_type, - content.call_id, - result_preview, - ) - else: - logger.info(f" Content {j}: {content_type}") - - messages_to_run: list[Any] = [] - is_new_user_turn = False - if provider_messages: - last_msg = provider_messages[-1] - role_value = last_msg.role.value if hasattr(last_msg.role, "value") else str(last_msg.role) - is_new_user_turn = role_value == "user" - - conversation_has_tool_calls = False - for msg in provider_messages: - role_value = msg.role.value if hasattr(msg.role, "value") else str(msg.role) - if role_value == "assistant" and hasattr(msg, "contents") and msg.contents: - if any(isinstance(content, FunctionCallContent) for content in msg.contents): - conversation_has_tool_calls = True - break - - state_context_msg = state_manager.state_context_message( - is_new_user_turn=is_new_user_turn, conversation_has_tool_calls=conversation_has_tool_calls - ) - if state_context_msg: - messages_to_run.append(state_context_msg) - - messages_to_run.extend(provider_messages) + pending_tool_calls: list[dict[str, Any]] = [] + tool_calls_by_id: dict[str, dict[str, Any]] = {} + tool_results: list[dict[str, Any]] = [] + tool_calls_ended: set[str] = set() + messages_snapshot_emitted = False + accumulated_text_content = "" + active_message_id: str | None = None + + # Check for FunctionApprovalResponseContent and emit updated state snapshot + # This ensures the UI shows the approved state (e.g., 2 steps) not the original (3 steps) + for snapshot_evt in collect_approved_state_snapshots( + provider_messages, + context.config.predict_state_config, + current_state, + event_bridge, + ): + yield snapshot_evt + + messages_to_run = select_messages_to_run(provider_messages, state_manager) - client_tools = convert_agui_tools_to_agent_framework(context.input_data.get("tools")) logger.info(f"[TOOLS] Client sent {len(client_tools) if client_tools else 0} tools") if client_tools: for tool in client_tools: @@ -421,17 +425,11 @@ async def run( register_additional_client_tools(context.agent, client_tools) tools_param = merge_tools(server_tools, client_tools) - all_updates: list[Any] = [] + collect_updates = response_format is not None + all_updates: list[Any] | None = [] if collect_updates else None update_count = 0 # Prepare metadata for chat client (Azure requires string values) - safe_metadata: dict[str, Any] = {} - thread_metadata = getattr(thread, "metadata", None) - if thread_metadata: - for key, value in thread_metadata.items(): - value_str = value if isinstance(value, str) else json.dumps(value) - if len(value_str) > 512: - value_str = value_str[:512] - safe_metadata[key] = value_str + safe_metadata = build_safe_metadata(getattr(thread, "metadata", None)) run_kwargs: dict[str, Any] = { "thread": thread, @@ -441,27 +439,200 @@ async def run( if safe_metadata: run_kwargs["store"] = True + async def _resolve_approval_responses( + messages: list[Any], + tools_for_execution: list[Any], + ) -> None: + fcc_todo = _collect_approval_responses(messages) + if not fcc_todo: + return + + approved_responses = [resp for resp in fcc_todo.values() if resp.approved] + approved_function_results: list[Any] = [] + if approved_responses and tools_for_execution: + chat_client = getattr(context.agent, "chat_client", None) + config = ( + getattr(chat_client, "function_invocation_configuration", None) or FunctionInvocationConfiguration() + ) + middleware_pipeline = extract_and_merge_function_middleware(chat_client, run_kwargs) + try: + results, _ = await _try_execute_function_calls( + custom_args=run_kwargs, + attempt_idx=0, + function_calls=approved_responses, + tools=tools_for_execution, + middleware_pipeline=middleware_pipeline, + config=config, + ) + approved_function_results = list(results) + except Exception: + logger.error("Failed to execute approved tool calls; injecting error results.") + approved_function_results = [] + + normalized_results: list[FunctionResultContent] = [] + for idx, approval in enumerate(approved_responses): + if idx < len(approved_function_results) and isinstance( + approved_function_results[idx], FunctionResultContent + ): + normalized_results.append(approved_function_results[idx]) + continue + call_id = approval.function_call.call_id or approval.id + normalized_results.append( + FunctionResultContent(call_id=call_id, result="Error: Tool call invocation failed.") + ) + + _replace_approval_contents_with_results(messages, fcc_todo, normalized_results) # type: ignore + + def _should_emit_tool_snapshot(tool_name: str | None) -> bool: + if not pending_tool_calls or not tool_results: + return False + if tool_name and context.config.predict_state_config and not context.config.require_confirmation: + for config in context.config.predict_state_config.values(): + if config["tool"] == tool_name: + logger.info( + f"Skipping intermediate MessagesSnapshotEvent for predictive tool '{tool_name}' " + " - delaying until summary" + ) + return False + return True + + def _build_messages_snapshot(tool_message_id: str | None = None) -> MessagesSnapshotEvent: + has_text_content = bool(accumulated_text_content) + all_messages = snapshot_messages.copy() + + if pending_tool_calls: + if tool_message_id and not has_text_content: + tool_call_message_id = tool_message_id + else: + tool_call_message_id = ( + active_message_id if not has_text_content and active_message_id else generate_event_id() + ) + tool_call_message = { + "id": tool_call_message_id, + "role": "assistant", + "tool_calls": pending_tool_calls.copy(), + } + all_messages.append(tool_call_message) + + all_messages.extend(tool_results) + + if has_text_content and active_message_id: + assistant_text_message = { + "id": active_message_id, + "role": "assistant", + "content": accumulated_text_content, + } + all_messages.append(assistant_text_message) + + return MessagesSnapshotEvent( + messages=all_messages, # type: ignore[arg-type] + ) + + # Use tools_param if available (includes client tools), otherwise fall back to server_tools + # This ensures both server tools AND client tools can be executed after approval + tools_for_approval = tools_param if tools_param is not None else server_tools + latest_approval = latest_approval_response(messages_to_run) + await _resolve_approval_responses(messages_to_run, tools_for_approval) + + if latest_approval and is_step_based_approval(latest_approval, context.config.predict_state_config): + from ._confirmation_strategies import DefaultConfirmationStrategy + + strategy = context.confirmation_strategy + if strategy is None: + strategy = DefaultConfirmationStrategy() + + steps = approval_steps(latest_approval) + if steps: + if latest_approval.approved: + confirmation_message = strategy.on_approval_accepted(steps) + else: + confirmation_message = strategy.on_approval_rejected(steps) + else: + if latest_approval.approved: + confirmation_message = strategy.on_state_confirmed() + else: + confirmation_message = strategy.on_state_rejected() + + message_id = generate_event_id() + yield TextMessageStartEvent(message_id=message_id, role="assistant") + yield TextMessageContentEvent(message_id=message_id, delta=confirmation_message) + yield TextMessageEndEvent(message_id=message_id) + yield event_bridge.create_run_finished_event() + return + async for update in context.agent.run_stream(messages_to_run, **run_kwargs): update_count += 1 logger.info(f"[STREAM] Received update #{update_count} from agent") - all_updates.append(update) + if all_updates is not None: + all_updates.append(update) + if event_bridge.current_message_id is None and update.contents: + has_tool_call = any(isinstance(content, FunctionCallContent) for content in update.contents) + has_text = any(isinstance(content, TextContent) for content in update.contents) + if has_tool_call and not has_text: + tool_message_id = generate_event_id() + event_bridge.current_message_id = tool_message_id + active_message_id = tool_message_id + accumulated_text_content = "" + logger.info( + "[STREAM] Emitting TextMessageStartEvent for tool-only response message_id=%s", + tool_message_id, + ) + yield TextMessageStartEvent(message_id=tool_message_id, role="assistant") events = await event_bridge.from_agent_run_update(update) logger.info(f"[STREAM] Update #{update_count} produced {len(events)} events") for event in events: + if isinstance(event, TextMessageStartEvent): + active_message_id = event.message_id + accumulated_text_content = "" + elif isinstance(event, TextMessageContentEvent): + accumulated_text_content += event.delta + elif isinstance(event, ToolCallStartEvent): + tool_call_entry = ensure_tool_call_entry(event.tool_call_id, tool_calls_by_id, pending_tool_calls) + tool_call_entry["function"]["name"] = event.tool_call_name + elif isinstance(event, ToolCallArgsEvent): + tool_call_entry = ensure_tool_call_entry(event.tool_call_id, tool_calls_by_id, pending_tool_calls) + tool_call_entry["function"]["arguments"] += event.delta + elif isinstance(event, ToolCallEndEvent): + tool_calls_ended.add(event.tool_call_id) + elif isinstance(event, ToolCallResultEvent): + tool_results.append( + { + "id": event.message_id, + "role": "tool", + "toolCallId": event.tool_call_id, + "content": event.content, + } + ) logger.info(f"[STREAM] Yielding event: {type(event).__name__}") yield event + if isinstance(event, ToolCallResultEvent): + tool_name = tool_name_for_call_id(tool_calls_by_id, event.tool_call_id) + if _should_emit_tool_snapshot(tool_name): + messages_snapshot_emitted = True + messages_snapshot = _build_messages_snapshot() + logger.info(f"[STREAM] Yielding event: {type(messages_snapshot).__name__}") + yield messages_snapshot + elif isinstance(event, ToolCallEndEvent): + tool_name = tool_name_for_call_id(tool_calls_by_id, event.tool_call_id) + if tool_name == "confirm_changes": + messages_snapshot_emitted = True + messages_snapshot = _build_messages_snapshot() + logger.info(f"[STREAM] Yielding event: {type(messages_snapshot).__name__}") + yield messages_snapshot logger.info(f"[STREAM] Agent stream completed. Total updates: {update_count}") if event_bridge.should_stop_after_confirm: - logger.info("Stopping run after confirm_changes - waiting for user response") + logger.info("Stopping run - waiting for user approval/confirmation response") + if event_bridge.current_message_id: + logger.info(f"[CONFIRM] Emitting TextMessageEndEvent for message_id={event_bridge.current_message_id}") + yield event_bridge.create_message_end_event(event_bridge.current_message_id) + event_bridge.current_message_id = None yield event_bridge.create_run_finished_event() return - if event_bridge.pending_tool_calls: - pending_without_end = [ - tc for tc in event_bridge.pending_tool_calls if tc.get("id") not in event_bridge.tool_calls_ended - ] + if pending_tool_calls: + pending_without_end = [tc for tc in pending_tool_calls if tc.get("id") not in tool_calls_ended] if pending_without_end: logger.info( "Found %s pending tool calls without end event - emitting ToolCallEndEvent", @@ -470,13 +641,11 @@ async def run( for tool_call in pending_without_end: tool_call_id = tool_call.get("id") if tool_call_id: - from ag_ui.core import ToolCallEndEvent - end_event = ToolCallEndEvent(tool_call_id=tool_call_id) logger.info(f"Emitting ToolCallEndEvent for declaration-only tool call '{tool_call_id}'") yield end_event - if all_updates and response_format: + if response_format and all_updates: from agent_framework import AgentRunResponse from pydantic import BaseModel @@ -508,37 +677,22 @@ async def run( logger.info(f"[FINALIZE] Emitting TextMessageEndEvent for message_id={event_bridge.current_message_id}") yield event_bridge.create_message_end_event(event_bridge.current_message_id) - assistant_text_message = { - "id": event_bridge.current_message_id, - "role": "assistant", - "content": event_bridge.accumulated_text_content, - } - - converted_input_messages = agui_messages_to_snapshot_format(event_bridge.input_messages) - all_messages = converted_input_messages.copy() - - if event_bridge.pending_tool_calls: - tool_call_message = { - "id": generate_event_id(), - "role": "assistant", - "tool_calls": event_bridge.pending_tool_calls.copy(), - } - all_messages.append(tool_call_message) - - all_messages.extend(event_bridge.tool_results.copy()) - all_messages.append(assistant_text_message) - - messages_snapshot = MessagesSnapshotEvent( - messages=all_messages, # type: ignore[arg-type] - ) + messages_snapshot = _build_messages_snapshot(tool_message_id=event_bridge.current_message_id) + messages_snapshot_emitted = True logger.info( - "[FINALIZE] Emitting MessagesSnapshotEvent with %s messages (text content length: %s)", - len(all_messages), - len(event_bridge.accumulated_text_content), + f"[FINALIZE] Emitting MessagesSnapshotEvent with {len(messages_snapshot.messages)} messages " + f"(text content length: {len(accumulated_text_content)})" ) yield messages_snapshot else: logger.info("[FINALIZE] No current_message_id - skipping TextMessageEndEvent") + if not messages_snapshot_emitted and (pending_tool_calls or tool_results): + messages_snapshot = _build_messages_snapshot() + messages_snapshot_emitted = True + logger.info( + f"[FINALIZE] Emitting MessagesSnapshotEvent with {len(messages_snapshot.messages)} messages" + ) + yield messages_snapshot logger.info("[FINALIZE] Emitting RUN_FINISHED event") yield event_bridge.create_run_finished_event() diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_utils.py b/python/packages/ag-ui/agent_framework_ag_ui/_utils.py index 8b271988dc..c0da986308 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_utils.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_utils.py @@ -3,13 +3,29 @@ """Utility functions for AG-UI integration.""" import copy +import json import uuid from collections.abc import Callable, MutableMapping, Sequence from dataclasses import asdict, is_dataclass from datetime import date, datetime from typing import Any -from agent_framework import AIFunction, ToolProtocol +from agent_framework import AIFunction, Role, ToolProtocol + +# Role mapping constants +AGUI_TO_FRAMEWORK_ROLE: dict[str, Role] = { + "user": Role.USER, + "assistant": Role.ASSISTANT, + "system": Role.SYSTEM, +} + +FRAMEWORK_TO_AGUI_ROLE: dict[Role, str] = { + Role.USER: "user", + Role.ASSISTANT: "assistant", + Role.SYSTEM: "system", +} + +ALLOWED_AGUI_ROLES: set[str] = {"user", "assistant", "system", "tool"} def generate_event_id() -> str: @@ -17,6 +33,85 @@ def generate_event_id() -> str: return str(uuid.uuid4()) +def safe_json_parse(value: Any) -> dict[str, Any] | None: + """Safely parse a value as JSON dict. + + Args: + value: String or dict to parse + + Returns: + Parsed dict or None if parsing fails + """ + if isinstance(value, dict): + return value + if isinstance(value, str): + try: + parsed = json.loads(value) + if isinstance(parsed, dict): + return parsed + except json.JSONDecodeError: + pass + return None + + +def get_role_value(message: Any) -> str: + """Extract role string from a message object. + + Handles both enum roles (with .value) and string roles. + + Args: + message: Message object with role attribute + + Returns: + Role as lowercase string, or empty string if not found + """ + role = getattr(message, "role", None) + if role is None: + return "" + if hasattr(role, "value"): + return str(role.value) + return str(role) + + +def normalize_agui_role(raw_role: Any) -> str: + """Normalize an AG-UI role to a standard role string. + + Args: + raw_role: Raw role value from AG-UI message + + Returns: + Normalized role string (user, assistant, system, or tool) + """ + if not isinstance(raw_role, str): + return "user" + role = raw_role.lower() + if role == "developer": + return "system" + if role in ALLOWED_AGUI_ROLES: + return role + return "user" + + +def extract_state_from_tool_args( + args: dict[str, Any] | None, + tool_arg_name: str, +) -> Any: + """Extract state value from tool arguments based on config. + + Args: + args: Parsed tool arguments dict + tool_arg_name: Name of the argument to extract, or "*" for entire args + + Returns: + Extracted state value, or None if not found + """ + if not args: + return None + if tool_arg_name == "*": + return args + return args.get(tool_arg_name) + + def merge_state(current: dict[str, Any], update: dict[str, Any]) -> dict[str, Any]: """Merge state updates. diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py index abbd113418..ab7a3533cd 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/human_in_the_loop_agent.py @@ -75,8 +75,10 @@ def human_in_the_loop_agent(chat_client: ChatClientProtocol) -> ChatAgent: 9. "Calibrate systems" 10. "Final testing" - After calling the function, provide a brief acknowledgment like: - "I've created a plan with 10 steps. You can customize which steps to enable before I proceed." + IMPORTANT: When you call generate_task_steps, the user will be shown the steps and asked to approve. + Do NOT output any text along with the function call - just call the function. + After the user approves and the function executes, THEN provide a brief acknowledgment like: + "The plan has been created with X steps selected." """, chat_client=chat_client, tools=[generate_task_steps], diff --git a/python/packages/ag-ui/pyproject.toml b/python/packages/ag-ui/pyproject.toml index 97694a9bb2..10f3e19e40 100644 --- a/python/packages/ag-ui/pyproject.toml +++ b/python/packages/ag-ui/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "agent-framework-ag-ui" -version = "1.0.0b251223" +version = "1.0.0b260107" description = "AG-UI protocol integration for Agent Framework" readme = "README.md" license-files = ["LICENSE"] diff --git a/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py b/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py index beb6f8af2c..281b81c968 100644 --- a/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py +++ b/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py @@ -630,3 +630,179 @@ async def stream_fn( # Should contain some reference to the document full_text = "".join(e.delta for e in text_events) assert "written" in full_text.lower() or "document" in full_text.lower() + + +async def test_function_approval_mode_executes_tool(): + """Test that function approval with approval_mode='always_require' sends the correct messages.""" + from agent_framework import FunctionResultContent, ai_function + from agent_framework.ag_ui import AgentFrameworkAgent + + messages_received: list[Any] = [] + + @ai_function( + name="get_datetime", + description="Get the current date and time", + approval_mode="always_require", + ) + def get_datetime() -> str: + return "2025/12/01 12:00:00" + + async def stream_fn( + messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + ) -> AsyncIterator[ChatResponseUpdate]: + # Capture the messages received by the chat client + messages_received.clear() + messages_received.extend(messages) + yield ChatResponseUpdate(contents=[TextContent(text="Processing completed")]) + + agent = ChatAgent( + name="test_agent", + instructions="Test", + chat_client=StreamingChatClientStub(stream_fn), + tools=[get_datetime], + ) + wrapper = AgentFrameworkAgent(agent=agent) + + # Simulate the conversation history with: + # 1. User message asking for time + # 2. Assistant message with the function call that needs approval + # 3. Tool approval message from user + tool_result: dict[str, Any] = {"accepted": True} + input_data: dict[str, Any] = { + "messages": [ + { + "role": "user", + "content": "What time is it?", + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_get_datetime_123", + "type": "function", + "function": { + "name": "get_datetime", + "arguments": "{}", + }, + } + ], + }, + { + "role": "tool", + "content": json.dumps(tool_result), + "toolCallId": "call_get_datetime_123", + }, + ], + } + + events: list[Any] = [] + async for event in wrapper.run_agent(input_data): + events.append(event) + + # Verify the run completed successfully + run_started = [e for e in events if e.type == "RUN_STARTED"] + run_finished = [e for e in events if e.type == "RUN_FINISHED"] + assert len(run_started) == 1 + assert len(run_finished) == 1 + + # Verify that a FunctionResultContent was created and sent to the agent + # Approved tool calls are resolved before the model run. + tool_result_found = False + for msg in messages_received: + for content in msg.contents: + if isinstance(content, FunctionResultContent): + tool_result_found = True + assert content.call_id == "call_get_datetime_123" + assert content.result == "2025/12/01 12:00:00" + break + + assert tool_result_found, ( + "FunctionResultContent should be included in messages sent to agent. " + "This is required for the model to see the approved tool execution result." + ) + + +async def test_function_approval_mode_rejection(): + """Test that function approval rejection creates a rejection response.""" + from agent_framework import FunctionResultContent, ai_function + from agent_framework.ag_ui import AgentFrameworkAgent + + messages_received: list[Any] = [] + + @ai_function( + name="delete_all_data", + description="Delete all user data", + approval_mode="always_require", + ) + def delete_all_data() -> str: + return "All data deleted" + + async def stream_fn( + messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + ) -> AsyncIterator[ChatResponseUpdate]: + # Capture the messages received by the chat client + messages_received.clear() + messages_received.extend(messages) + yield ChatResponseUpdate(contents=[TextContent(text="Operation cancelled")]) + + agent = ChatAgent( + name="test_agent", + instructions="Test", + chat_client=StreamingChatClientStub(stream_fn), + tools=[delete_all_data], + ) + wrapper = AgentFrameworkAgent(agent=agent) + + # Simulate rejection + tool_result: dict[str, Any] = {"accepted": False} + input_data: dict[str, Any] = { + "messages": [ + { + "role": "user", + "content": "Delete all my data", + }, + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_delete_123", + "type": "function", + "function": { + "name": "delete_all_data", + "arguments": "{}", + }, + } + ], + }, + { + "role": "tool", + "content": json.dumps(tool_result), + "toolCallId": "call_delete_123", + }, + ], + } + + events: list[Any] = [] + async for event in wrapper.run_agent(input_data): + events.append(event) + + # Verify the run completed + run_finished = [e for e in events if e.type == "RUN_FINISHED"] + assert len(run_finished) == 1 + + # Verify that a FunctionResultContent with rejection payload was created + rejection_found = False + for msg in messages_received: + for content in msg.contents: + if isinstance(content, FunctionResultContent): + rejection_found = True + assert content.call_id == "call_delete_123" + assert content.result == "Error: Tool call invocation was rejected by user." + break + + assert rejection_found, ( + "FunctionResultContent with rejection details should be included in messages sent to agent. " + "This tells the model that the tool was rejected." + ) diff --git a/python/packages/ag-ui/tests/test_backend_tool_rendering.py b/python/packages/ag-ui/tests/test_backend_tool_rendering.py index 6fefc14665..97654182cf 100644 --- a/python/packages/ag-ui/tests/test_backend_tool_rendering.py +++ b/python/packages/ag-ui/tests/test_backend_tool_rendering.py @@ -52,8 +52,8 @@ async def test_tool_call_flow(): update2 = AgentRunResponseUpdate(contents=[tool_result]) events2 = await bridge.from_agent_run_update(update2) - # Should have: ToolCallEndEvent, ToolCallResultEvent, MessagesSnapshotEvent - assert len(events2) == 3 + # Should have: ToolCallEndEvent, ToolCallResultEvent + assert len(events2) == 2 assert isinstance(events2[0], ToolCallEndEvent) assert isinstance(events2[1], ToolCallResultEvent) diff --git a/python/packages/ag-ui/tests/test_events_comprehensive.py b/python/packages/ag-ui/tests/test_events_comprehensive.py index a51d1f382a..cfd45ea5c8 100644 --- a/python/packages/ag-ui/tests/test_events_comprehensive.py +++ b/python/packages/ag-ui/tests/test_events_comprehensive.py @@ -201,7 +201,8 @@ async def test_tool_result_with_none(): assert len(events) == 2 assert events[0].type == "TOOL_CALL_END" assert events[1].type == "TOOL_CALL_RESULT" - assert events[1].content == "" + # prepare_function_call_results serializes None as JSON "null" + assert events[1].content == "null" async def test_multiple_tool_results_in_sequence(): @@ -230,7 +231,12 @@ async def test_function_approval_request_basic(): """Test FunctionApprovalRequestContent conversion.""" from agent_framework_ag_ui._events import AgentFrameworkEventBridge - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") + # Set require_confirmation=False to test just the function_approval_request event + bridge = AgentFrameworkEventBridge( + run_id="test_run", + thread_id="test_thread", + require_confirmation=False, + ) func_call = FunctionCallContent( call_id="call_123", @@ -283,14 +289,12 @@ async def test_empty_predict_state_config(): assert "STATE_DELTA" not in event_types assert "STATE_SNAPSHOT" not in event_types - # Should have: ToolCallStart, ToolCallArgs, ToolCallEnd, ToolCallResult, MessagesSnapshot - # MessagesSnapshotEvent is emitted after tool results to track the conversation + # Should have: ToolCallStart, ToolCallArgs, ToolCallEnd, ToolCallResult assert event_types == [ "TOOL_CALL_START", "TOOL_CALL_ARGS", "TOOL_CALL_END", "TOOL_CALL_RESULT", - "MESSAGES_SNAPSHOT", ] @@ -688,3 +692,97 @@ async def test_state_delta_count_logging(): # State delta count should have incremented (one per unique state update) assert bridge.state_delta_count >= 1 + + +# Tests for list type tool results (MCP tool serialization) + + +async def test_tool_result_with_empty_list(): + """Test FunctionResultContent with empty list result.""" + from agent_framework_ag_ui._events import AgentFrameworkEventBridge + + bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") + + update = AgentRunResponseUpdate(contents=[FunctionResultContent(call_id="call_123", result=[])]) + events = await bridge.from_agent_run_update(update) + + assert len(events) == 2 + assert events[0].type == "TOOL_CALL_END" + assert events[1].type == "TOOL_CALL_RESULT" + # Empty list serializes as JSON empty array + assert events[1].content == "[]" + + +async def test_tool_result_with_single_text_content(): + """Test FunctionResultContent with single TextContent-like item (MCP tool result).""" + from dataclasses import dataclass + + from agent_framework_ag_ui._events import AgentFrameworkEventBridge + + @dataclass + class MockTextContent: + text: str + + bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") + + update = AgentRunResponseUpdate( + contents=[FunctionResultContent(call_id="call_123", result=[MockTextContent("Hello from MCP tool!")])] + ) + events = await bridge.from_agent_run_update(update) + + assert len(events) == 2 + assert events[0].type == "TOOL_CALL_END" + assert events[1].type == "TOOL_CALL_RESULT" + # TextContent text is extracted and serialized as JSON array + assert events[1].content == '["Hello from MCP tool!"]' + + +async def test_tool_result_with_multiple_text_contents(): + """Test FunctionResultContent with multiple TextContent-like items (MCP tool result).""" + from dataclasses import dataclass + + from agent_framework_ag_ui._events import AgentFrameworkEventBridge + + @dataclass + class MockTextContent: + text: str + + bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") + + update = AgentRunResponseUpdate( + contents=[ + FunctionResultContent( + call_id="call_123", + result=[MockTextContent("First result"), MockTextContent("Second result")], + ) + ] + ) + events = await bridge.from_agent_run_update(update) + + assert len(events) == 2 + assert events[0].type == "TOOL_CALL_END" + assert events[1].type == "TOOL_CALL_RESULT" + # Multiple TextContent items should return JSON array + assert events[1].content == '["First result", "Second result"]' + + +async def test_tool_result_with_model_dump_objects(): + """Test FunctionResultContent with Pydantic BaseModel objects.""" + from pydantic import BaseModel + + from agent_framework_ag_ui._events import AgentFrameworkEventBridge + + class MockModel(BaseModel): + value: int + + bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") + + update = AgentRunResponseUpdate( + contents=[FunctionResultContent(call_id="call_123", result=[MockModel(value=1), MockModel(value=2)])] + ) + events = await bridge.from_agent_run_update(update) + + assert len(events) == 2 + assert events[1].type == "TOOL_CALL_RESULT" + # Should be properly serialized JSON array without double escaping + assert events[1].content == '[{"value": 1}, {"value": 2}]' diff --git a/python/packages/ag-ui/tests/test_helpers_ag_ui.py b/python/packages/ag-ui/tests/test_helpers_ag_ui.py index bfb528511e..fc82b11510 100644 --- a/python/packages/ag-ui/tests/test_helpers_ag_ui.py +++ b/python/packages/ag-ui/tests/test_helpers_ag_ui.py @@ -18,6 +18,7 @@ from agent_framework._clients import BaseChatClient from agent_framework._types import ChatResponse, ChatResponseUpdate +from agent_framework_ag_ui._message_adapters import _deduplicate_messages, _sanitize_tool_history from agent_framework_ag_ui._orchestrators import ExecutionContext StreamFn = Callable[..., AsyncIterator[ChatResponseUpdate]] @@ -134,5 +135,9 @@ def get_new_thread(self, **kwargs: Any) -> AgentThread: class TestExecutionContext(ExecutionContext): """ExecutionContext helper that allows setting messages for tests.""" - def set_messages(self, messages: list[ChatMessage]) -> None: - self._messages = messages + def set_messages(self, messages: list[ChatMessage], *, normalize: bool = True) -> None: + if normalize: + self._messages = _deduplicate_messages(_sanitize_tool_history(messages)) + else: + self._messages = messages + self._snapshot_messages = None diff --git a/python/packages/ag-ui/tests/test_human_in_the_loop.py b/python/packages/ag-ui/tests/test_human_in_the_loop.py index 92f6d69926..55a2869c91 100644 --- a/python/packages/ag-ui/tests/test_human_in_the_loop.py +++ b/python/packages/ag-ui/tests/test_human_in_the_loop.py @@ -10,9 +10,11 @@ async def test_function_approval_request_emission(): """Test that CustomEvent is emitted for FunctionApprovalRequestContent.""" + # Set require_confirmation=False to test just the function_approval_request event bridge = AgentFrameworkEventBridge( run_id="test_run", thread_id="test_thread", + require_confirmation=False, ) # Create approval request @@ -47,11 +49,65 @@ async def test_function_approval_request_emission(): assert event.value["function_call"]["arguments"]["subject"] == "Test" +async def test_function_approval_request_with_confirm_changes(): + """Test that confirm_changes is also emitted when require_confirmation=True.""" + bridge = AgentFrameworkEventBridge( + run_id="test_run", + thread_id="test_thread", + require_confirmation=True, + ) + + func_call = FunctionCallContent( + call_id="call_456", + name="delete_file", + arguments={"path": "/tmp/test.txt"}, + ) + approval_request = FunctionApprovalRequestContent( + id="approval_002", + function_call=func_call, + ) + + update = AgentRunResponseUpdate(contents=[approval_request]) + events = await bridge.from_agent_run_update(update) + + # Should emit: ToolCallEndEvent, CustomEvent, and confirm_changes (Start, Args, End) = 5 events + assert len(events) == 5 + + # Check ToolCallEndEvent + assert events[0].type == "TOOL_CALL_END" + assert events[0].tool_call_id == "call_456" + + # Check function_approval_request CustomEvent + assert events[1].type == "CUSTOM" + assert events[1].name == "function_approval_request" + + # Check confirm_changes tool call events + assert events[2].type == "TOOL_CALL_START" + assert events[2].tool_call_name == "confirm_changes" + assert events[3].type == "TOOL_CALL_ARGS" + # Verify confirm_changes includes function info for Dojo UI + import json + + args = json.loads(events[3].delta) + assert args["function_name"] == "delete_file" + assert args["function_call_id"] == "call_456" + assert args["function_arguments"] == {"path": "/tmp/test.txt"} + assert args["steps"] == [ + { + "description": "Execute delete_file", + "status": "enabled", + } + ] + assert events[4].type == "TOOL_CALL_END" + + async def test_multiple_approval_requests(): """Test handling multiple approval requests in one update.""" + # Set require_confirmation=False to simplify the test bridge = AgentFrameworkEventBridge( run_id="test_run", thread_id="test_thread", + require_confirmation=False, ) func_call_1 = FunctionCallContent( @@ -94,3 +150,32 @@ async def test_multiple_approval_requests(): assert events[3].type == "CUSTOM" assert events[3].name == "function_approval_request" assert events[3].value["id"] == "approval_2" + + +async def test_function_approval_request_sets_stop_flag(): + """Test that function approval request sets should_stop_after_confirm flag. + + This ensures the orchestrator stops the run after emitting the approval request, + allowing the UI to send back an approval response. + """ + bridge = AgentFrameworkEventBridge( + run_id="test_run", + thread_id="test_thread", + ) + + assert bridge.should_stop_after_confirm is False + + func_call = FunctionCallContent( + call_id="call_stop_test", + name="get_datetime", + arguments={}, + ) + approval_request = FunctionApprovalRequestContent( + id="approval_stop_test", + function_call=func_call, + ) + + update = AgentRunResponseUpdate(contents=[approval_request]) + await bridge.from_agent_run_update(update) + + assert bridge.should_stop_after_confirm is True diff --git a/python/packages/ag-ui/tests/test_message_adapters.py b/python/packages/ag-ui/tests/test_message_adapters.py index a21375b87b..9173314a28 100644 --- a/python/packages/ag-ui/tests/test_message_adapters.py +++ b/python/packages/ag-ui/tests/test_message_adapters.py @@ -2,12 +2,15 @@ """Tests for message adapters.""" +import json + import pytest -from agent_framework import ChatMessage, FunctionCallContent, Role, TextContent +from agent_framework import ChatMessage, FunctionCallContent, FunctionResultContent, Role, TextContent from agent_framework_ag_ui._message_adapters import ( agent_framework_messages_to_agui, agui_messages_to_agent_framework, + agui_messages_to_snapshot_format, extract_text_from_contents, ) @@ -43,6 +46,32 @@ def test_agent_framework_to_agui_basic(sample_agent_framework_message): assert messages[0]["id"] == "msg-123" +def test_agent_framework_to_agui_normalizes_dict_roles(): + """Dict inputs normalize unknown roles for UI compatibility.""" + messages = [ + {"role": "developer", "content": "policy"}, + {"role": "weird_role", "content": "payload"}, + ] + + converted = agent_framework_messages_to_agui(messages) + + assert converted[0]["role"] == "system" + assert converted[1]["role"] == "user" + + +def test_agui_snapshot_format_normalizes_roles(): + """Snapshot normalization coerces roles into supported AG-UI values.""" + messages = [ + {"role": "Developer", "content": "policy"}, + {"role": "unknown", "content": "payload"}, + ] + + normalized = agui_messages_to_snapshot_format(messages) + + assert normalized[0]["role"] == "system" + assert normalized[1]["role"] == "user" + + def test_agui_tool_result_to_agent_framework(): """Test converting AG-UI tool result message to Agent Framework.""" tool_result_message = { @@ -68,6 +97,237 @@ def test_agui_tool_result_to_agent_framework(): assert message.additional_properties.get("tool_call_id") == "call_123" +def test_agui_tool_approval_updates_tool_call_arguments(): + """Tool approval updates matching tool call arguments for snapshots and agent context.""" + messages_input = [ + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_123", + "type": "function", + "function": { + "name": "generate_task_steps", + "arguments": { + "steps": [ + {"description": "Boil water", "status": "enabled"}, + {"description": "Brew coffee", "status": "enabled"}, + {"description": "Serve coffee", "status": "enabled"}, + ] + }, + }, + } + ], + "id": "msg_1", + }, + { + "role": "tool", + "content": json.dumps( + { + "accepted": True, + "steps": [ + {"description": "Boil water", "status": "enabled"}, + {"description": "Serve coffee", "status": "enabled"}, + ], + } + ), + "toolCallId": "call_123", + "id": "msg_2", + }, + ] + + messages = agui_messages_to_agent_framework(messages_input) + + assert len(messages) == 2 + assistant_msg = messages[0] + func_call = next(content for content in assistant_msg.contents if isinstance(content, FunctionCallContent)) + assert func_call.arguments == { + "steps": [ + {"description": "Boil water", "status": "enabled"}, + {"description": "Brew coffee", "status": "disabled"}, + {"description": "Serve coffee", "status": "enabled"}, + ] + } + assert messages_input[0]["tool_calls"][0]["function"]["arguments"] == { + "steps": [ + {"description": "Boil water", "status": "enabled"}, + {"description": "Brew coffee", "status": "disabled"}, + {"description": "Serve coffee", "status": "enabled"}, + ] + } + + from agent_framework import FunctionApprovalResponseContent + + approval_msg = messages[1] + approval_content = next( + content for content in approval_msg.contents if isinstance(content, FunctionApprovalResponseContent) + ) + assert approval_content.function_call.parse_arguments() == { + "steps": [ + {"description": "Boil water", "status": "enabled"}, + {"description": "Serve coffee", "status": "enabled"}, + ] + } + assert approval_content.additional_properties is not None + assert approval_content.additional_properties.get("ag_ui_state_args") == { + "steps": [ + {"description": "Boil water", "status": "enabled"}, + {"description": "Brew coffee", "status": "disabled"}, + {"description": "Serve coffee", "status": "enabled"}, + ] + } + + +def test_agui_tool_approval_from_confirm_changes_maps_to_function_call(): + """Confirm_changes approvals map back to the original tool call when metadata is present.""" + messages_input = [ + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_tool", + "type": "function", + "function": {"name": "get_datetime", "arguments": {}}, + }, + { + "id": "call_confirm", + "type": "function", + "function": { + "name": "confirm_changes", + "arguments": {"function_call_id": "call_tool"}, + }, + }, + ], + "id": "msg_1", + }, + { + "role": "tool", + "content": json.dumps({"accepted": True, "function_call_id": "call_tool"}), + "toolCallId": "call_confirm", + "id": "msg_2", + }, + ] + + messages = agui_messages_to_agent_framework(messages_input) + + from agent_framework import FunctionApprovalResponseContent + + approval_msg = messages[1] + approval_content = next( + content for content in approval_msg.contents if isinstance(content, FunctionApprovalResponseContent) + ) + + assert approval_content.function_call.call_id == "call_tool" + assert approval_content.function_call.name == "get_datetime" + assert approval_content.function_call.parse_arguments() == {} + assert messages_input[0]["tool_calls"][0]["function"]["arguments"] == {} + + +def test_agui_tool_approval_from_confirm_changes_falls_back_to_sibling_call(): + """Confirm_changes approvals map to the only sibling tool call when metadata is missing.""" + messages_input = [ + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_tool", + "type": "function", + "function": {"name": "get_datetime", "arguments": {}}, + }, + { + "id": "call_confirm", + "type": "function", + "function": {"name": "confirm_changes", "arguments": {}}, + }, + ], + "id": "msg_1", + }, + { + "role": "tool", + "content": json.dumps( + { + "accepted": True, + "steps": [{"description": "Approve get_datetime", "status": "enabled"}], + } + ), + "toolCallId": "call_confirm", + "id": "msg_2", + }, + ] + + messages = agui_messages_to_agent_framework(messages_input) + + from agent_framework import FunctionApprovalResponseContent + + approval_msg = messages[1] + approval_content = next( + content for content in approval_msg.contents if isinstance(content, FunctionApprovalResponseContent) + ) + + assert approval_content.function_call.call_id == "call_tool" + assert approval_content.function_call.name == "get_datetime" + assert approval_content.function_call.parse_arguments() == {} + assert messages_input[0]["tool_calls"][0]["function"]["arguments"] == {} + + +def test_agui_tool_approval_from_generate_task_steps_maps_to_function_call(): + """Approval tool payloads map to the referenced function call when function_call_id is present.""" + messages_input = [ + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_tool", + "type": "function", + "function": {"name": "get_datetime", "arguments": {}}, + }, + { + "id": "call_steps", + "type": "function", + "function": { + "name": "generate_task_steps", + "arguments": { + "function_name": "get_datetime", + "function_call_id": "call_tool", + "function_arguments": {}, + "steps": [{"description": "Execute get_datetime", "status": "enabled"}], + }, + }, + }, + ], + "id": "msg_1", + }, + { + "role": "tool", + "content": json.dumps( + { + "accepted": True, + "steps": [{"description": "Execute get_datetime", "status": "enabled"}], + } + ), + "toolCallId": "call_steps", + "id": "msg_2", + }, + ] + + messages = agui_messages_to_agent_framework(messages_input) + + from agent_framework import FunctionApprovalResponseContent + + approval_msg = messages[1] + approval_content = next( + content for content in approval_msg.contents if isinstance(content, FunctionApprovalResponseContent) + ) + + assert approval_content.function_call.call_id == "call_tool" + assert approval_content.function_call.name == "get_datetime" + assert approval_content.function_call.parse_arguments() == {} + + def test_agui_multiple_messages_to_agent_framework(): """Test converting multiple AG-UI messages.""" messages_input = [ @@ -278,3 +538,119 @@ def test_extract_text_from_custom_contents(): result = extract_text_from_contents(contents) assert result == "Custom Mixed" + + +# Tests for FunctionResultContent serialization in agent_framework_messages_to_agui + + +def test_agent_framework_to_agui_function_result_dict(): + """Test converting FunctionResultContent with dict result to AG-UI.""" + msg = ChatMessage( + role=Role.TOOL, + contents=[FunctionResultContent(call_id="call-123", result={"key": "value", "count": 42})], + message_id="msg-789", + ) + + messages = agent_framework_messages_to_agui([msg]) + + assert len(messages) == 1 + agui_msg = messages[0] + assert agui_msg["role"] == "tool" + assert agui_msg["toolCallId"] == "call-123" + assert agui_msg["content"] == '{"key": "value", "count": 42}' + + +def test_agent_framework_to_agui_function_result_none(): + """Test converting FunctionResultContent with None result to AG-UI.""" + msg = ChatMessage( + role=Role.TOOL, + contents=[FunctionResultContent(call_id="call-123", result=None)], + message_id="msg-789", + ) + + messages = agent_framework_messages_to_agui([msg]) + + assert len(messages) == 1 + agui_msg = messages[0] + # None serializes as JSON null + assert agui_msg["content"] == "null" + + +def test_agent_framework_to_agui_function_result_string(): + """Test converting FunctionResultContent with string result to AG-UI.""" + msg = ChatMessage( + role=Role.TOOL, + contents=[FunctionResultContent(call_id="call-123", result="plain text result")], + message_id="msg-789", + ) + + messages = agent_framework_messages_to_agui([msg]) + + assert len(messages) == 1 + agui_msg = messages[0] + assert agui_msg["content"] == "plain text result" + + +def test_agent_framework_to_agui_function_result_empty_list(): + """Test converting FunctionResultContent with empty list result to AG-UI.""" + msg = ChatMessage( + role=Role.TOOL, + contents=[FunctionResultContent(call_id="call-123", result=[])], + message_id="msg-789", + ) + + messages = agent_framework_messages_to_agui([msg]) + + assert len(messages) == 1 + agui_msg = messages[0] + # Empty list serializes as JSON empty array + assert agui_msg["content"] == "[]" + + +def test_agent_framework_to_agui_function_result_single_text_content(): + """Test converting FunctionResultContent with single TextContent-like item.""" + from dataclasses import dataclass + + @dataclass + class MockTextContent: + text: str + + msg = ChatMessage( + role=Role.TOOL, + contents=[FunctionResultContent(call_id="call-123", result=[MockTextContent("Hello from MCP!")])], + message_id="msg-789", + ) + + messages = agent_framework_messages_to_agui([msg]) + + assert len(messages) == 1 + agui_msg = messages[0] + # TextContent text is extracted and serialized as JSON array + assert agui_msg["content"] == '["Hello from MCP!"]' + + +def test_agent_framework_to_agui_function_result_multiple_text_contents(): + """Test converting FunctionResultContent with multiple TextContent-like items.""" + from dataclasses import dataclass + + @dataclass + class MockTextContent: + text: str + + msg = ChatMessage( + role=Role.TOOL, + contents=[ + FunctionResultContent( + call_id="call-123", + result=[MockTextContent("First result"), MockTextContent("Second result")], + ) + ], + message_id="msg-789", + ) + + messages = agent_framework_messages_to_agui([msg]) + + assert len(messages) == 1 + agui_msg = messages[0] + # Multiple items should return JSON array + assert agui_msg["content"] == '["First result", "Second result"]' diff --git a/python/packages/ag-ui/tests/test_message_hygiene.py b/python/packages/ag-ui/tests/test_message_hygiene.py index ba775fa7d9..380ff438bd 100644 --- a/python/packages/ag-ui/tests/test_message_hygiene.py +++ b/python/packages/ag-ui/tests/test_message_hygiene.py @@ -2,10 +2,7 @@ from agent_framework import ChatMessage, FunctionCallContent, FunctionResultContent, TextContent -from agent_framework_ag_ui._orchestration._message_hygiene import ( - deduplicate_messages, - sanitize_tool_history, -) +from agent_framework_ag_ui._message_adapters import _deduplicate_messages, _sanitize_tool_history def test_sanitize_tool_history_injects_confirm_changes_result() -> None: @@ -26,7 +23,7 @@ def test_sanitize_tool_history_injects_confirm_changes_result() -> None: ), ] - sanitized = sanitize_tool_history(messages) + sanitized = _sanitize_tool_history(messages) tool_messages = [ msg for msg in sanitized if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "tool" @@ -48,6 +45,6 @@ def test_deduplicate_messages_prefers_non_empty_tool_results() -> None: ), ] - deduped = deduplicate_messages(messages) + deduped = _deduplicate_messages(messages) assert len(deduped) == 1 assert deduped[0].contents[0].result == "result data" diff --git a/python/packages/ag-ui/tests/test_orchestrators.py b/python/packages/ag-ui/tests/test_orchestrators.py index af90ea2e88..8c00602538 100644 --- a/python/packages/ag-ui/tests/test_orchestrators.py +++ b/python/packages/ag-ui/tests/test_orchestrators.py @@ -42,6 +42,29 @@ async def run_stream( yield AgentRunResponseUpdate(contents=[TextContent(text="ok")], role="assistant") +class RecordingAgent: + """Agent stub that captures messages passed to run_stream.""" + + def __init__(self) -> None: + self.chat_options = SimpleNamespace(tools=[], response_format=None) + self.tools: list[Any] = [] + self.chat_client = SimpleNamespace( + function_invocation_configuration=FunctionInvocationConfiguration(), + ) + self.seen_messages: list[Any] | None = None + + async def run_stream( + self, + messages: list[Any], + *, + thread: Any, + tools: list[Any] | None = None, + **kwargs: Any, + ) -> AsyncGenerator[AgentRunResponseUpdate, None]: + self.seen_messages = messages + yield AgentRunResponseUpdate(contents=[TextContent(text="ok")], role="assistant") + + async def test_default_orchestrator_merges_client_tools() -> None: """Client tool declarations are merged with server tools before running agent.""" @@ -151,3 +174,104 @@ async def test_default_orchestrator_with_snake_case_ids() -> None: last_event = events[-1] assert last_event.run_id == "test-snakecase-runid" assert last_event.thread_id == "test-snakecase-threadid" + + +async def test_state_context_injected_when_tool_call_state_mismatch() -> None: + """State context should be injected when current state differs from tool call args.""" + + agent = RecordingAgent() + orchestrator = DefaultOrchestrator() + + tool_recipe = {"title": "Salad", "special_preferences": []} + current_recipe = {"title": "Salad", "special_preferences": ["Vegetarian"]} + + input_data = { + "state": {"recipe": current_recipe}, + "messages": [ + {"role": "system", "content": "Instructions"}, + { + "role": "assistant", + "tool_calls": [ + { + "id": "call_1", + "type": "function", + "function": {"name": "update_recipe", "arguments": {"recipe": tool_recipe}}, + } + ], + }, + {"role": "user", "content": "What are the dietary preferences?"}, + ], + } + + context = ExecutionContext( + input_data=input_data, + agent=agent, + config=AgentConfig( + state_schema={"recipe": {"type": "object"}}, + predict_state_config={"recipe": {"tool": "update_recipe", "tool_argument": "recipe"}}, + require_confirmation=False, + ), + ) + + async for _event in orchestrator.run(context): + pass + + assert agent.seen_messages is not None + state_messages = [] + for msg in agent.seen_messages: + role_value = msg.role.value if hasattr(msg.role, "value") else str(msg.role) + if role_value != "system": + continue + for content in msg.contents or []: + if isinstance(content, TextContent) and content.text.startswith("Current state of the application:"): + state_messages.append(content.text) + assert state_messages + assert "Vegetarian" in state_messages[0] + + +async def test_state_context_not_injected_when_tool_call_matches_state() -> None: + """State context should be skipped when tool call args match current state.""" + + agent = RecordingAgent() + orchestrator = DefaultOrchestrator() + + input_data = { + "messages": [ + {"role": "system", "content": "Instructions"}, + { + "role": "assistant", + "tool_calls": [ + { + "id": "call_1", + "type": "function", + "function": {"name": "update_recipe", "arguments": {"recipe": {}}}, + } + ], + }, + {"role": "user", "content": "What are the dietary preferences?"}, + ], + } + + context = ExecutionContext( + input_data=input_data, + agent=agent, + config=AgentConfig( + state_schema={"recipe": {"type": "object"}}, + predict_state_config={"recipe": {"tool": "update_recipe", "tool_argument": "recipe"}}, + require_confirmation=False, + ), + ) + + async for _event in orchestrator.run(context): + pass + + assert agent.seen_messages is not None + state_messages = [] + for msg in agent.seen_messages: + role_value = msg.role.value if hasattr(msg.role, "value") else str(msg.role) + if role_value != "system": + continue + for content in msg.contents or []: + if isinstance(content, TextContent) and content.text.startswith("Current state of the application:"): + state_messages.append(content.text) + assert not state_messages diff --git a/python/packages/ag-ui/tests/test_orchestrators_coverage.py b/python/packages/ag-ui/tests/test_orchestrators_coverage.py index 1da11bffbc..041e25c3d2 100644 --- a/python/packages/ag-ui/tests/test_orchestrators_coverage.py +++ b/python/packages/ag-ui/tests/test_orchestrators_coverage.py @@ -62,7 +62,7 @@ async def test_human_in_the_loop_json_decode_error() -> None: agent=agent, config=AgentConfig(), ) - context.set_messages(messages) + context.set_messages(messages, normalize=False) assert orchestrator.can_handle(context) @@ -385,8 +385,8 @@ async def test_state_context_injection() -> None: assert "banana" in system_messages[0].contents[0].text -async def test_no_state_context_injection_with_tool_calls() -> None: - """Test state context is NOT injected if conversation has tool calls.""" +async def test_state_context_injection_with_tool_calls_and_input_state() -> None: + """Test state context is injected when state is provided, even with tool calls.""" from agent_framework import ChatMessage, FunctionCallContent, FunctionResultContent, TextContent messages = [ @@ -420,13 +420,13 @@ async def test_no_state_context_injection_with_tool_calls() -> None: async for event in orchestrator.run(context): events.append(event) - # Should NOT inject state context system message since conversation has tool calls + # Should inject state context system message because input state is provided system_messages = [ msg for msg in agent.messages_received if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "system" ] - assert len(system_messages) == 0 + assert len(system_messages) == 1 async def test_structured_output_processing() -> None: @@ -685,6 +685,54 @@ async def test_confirm_changes_with_invalid_json_fallback() -> None: assert len(user_messages) == 1 +async def test_confirm_changes_closes_active_message_before_finish() -> None: + """Confirm-changes flow closes any active text message before run finishes.""" + from ag_ui.core import TextMessageEndEvent, TextMessageStartEvent + from agent_framework import FunctionCallContent, FunctionResultContent + + updates = [ + AgentRunResponseUpdate( + contents=[ + FunctionCallContent( + name="write_document_local", + call_id="call_1", + arguments='{"document": "Draft"}', + ) + ] + ), + AgentRunResponseUpdate(contents=[FunctionResultContent(call_id="call_1", result="Done")]), + ] + + orchestrator = DefaultOrchestrator() + input_data: dict[str, Any] = {"messages": [{"role": "user", "content": "Start"}]} + agent = StubAgent( + chat_options=DEFAULT_CHAT_OPTIONS, + updates=updates, + ) + context = TestExecutionContext( + input_data=input_data, + agent=agent, + config=AgentConfig( + predict_state_config={"document": {"tool": "write_document_local", "tool_argument": "document"}}, + require_confirmation=True, + ), + ) + + events: list[Any] = [] + async for event in orchestrator.run(context): + events.append(event) + + start_events = [e for e in events if isinstance(e, TextMessageStartEvent)] + end_events = [e for e in events if isinstance(e, TextMessageEndEvent)] + assert len(start_events) == 1 + assert len(end_events) == 1 + assert end_events[0].message_id == start_events[0].message_id + + end_index = events.index(end_events[0]) + finished_index = events.index([e for e in events if e.type == "RUN_FINISHED"][0]) + assert end_index < finished_index + + async def test_tool_result_kept_when_call_id_matches() -> None: """Test tool result is kept when call_id matches pending tool calls.""" from agent_framework import ChatMessage, FunctionCallContent, FunctionResultContent diff --git a/python/packages/ag-ui/tests/test_utils.py b/python/packages/ag-ui/tests/test_utils.py index 4a6d0360bd..b077468b81 100644 --- a/python/packages/ag-ui/tests/test_utils.py +++ b/python/packages/ag-ui/tests/test_utils.py @@ -5,7 +5,11 @@ from dataclasses import dataclass from datetime import date, datetime -from agent_framework_ag_ui._utils import generate_event_id, make_json_safe, merge_state +from agent_framework_ag_ui._utils import ( + generate_event_id, + make_json_safe, + merge_state, +) def test_generate_event_id(): diff --git a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py index a5b169fbbf..b29b13fbd3 100644 --- a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py +++ b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py @@ -1,5 +1,4 @@ # Copyright (c) Microsoft. All rights reserved. - from collections.abc import AsyncIterable, MutableMapping, MutableSequence, Sequence from typing import Any, ClassVar, Final, TypeVar @@ -13,7 +12,10 @@ ChatResponse, ChatResponseUpdate, CitationAnnotation, + CodeInterpreterToolCallContent, + CodeInterpreterToolResultContent, Contents, + ErrorContent, FinishReason, FunctionCallContent, FunctionResultContent, @@ -21,6 +23,8 @@ HostedFileContent, HostedMCPTool, HostedWebSearchTool, + MCPServerToolCallContent, + MCPServerToolResultContent, Role, TextContent, TextReasoningContent, @@ -45,6 +49,8 @@ BetaTextBlock, BetaUsage, ) +from anthropic.types.beta.beta_bash_code_execution_tool_result_error import BetaBashCodeExecutionToolResultError +from anthropic.types.beta.beta_code_execution_tool_result_error import BetaCodeExecutionToolResultError from pydantic import SecretStr, ValidationError logger = get_logger("agent_framework.anthropic") @@ -505,7 +511,7 @@ def _process_message(self, message: BetaMessage) -> ChatResponse: usage_details=self._parse_usage_from_anthropic(message.usage), model_id=message.model, finish_reason=FINISH_REASON_MAP.get(message.stop_reason) if message.stop_reason else None, - raw_response=message, + raw_representation=message, ) def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatResponseUpdate | None: @@ -530,13 +536,14 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons finish_reason=FINISH_REASON_MAP.get(event.message.stop_reason) if event.message.stop_reason else None, - raw_response=event, + raw_representation=event, ) case "message_delta": usage = self._parse_usage_from_anthropic(event.usage) return ChatResponseUpdate( contents=[UsageContent(details=usage, raw_representation=event.usage)] if usage else [], - raw_response=event, + finish_reason=FINISH_REASON_MAP.get(event.delta.stop_reason) if event.delta.stop_reason else None, + raw_representation=event, ) case "message_stop": logger.debug("Received message_stop event; no content to process.") @@ -544,13 +551,13 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons contents = self._parse_contents_from_anthropic([event.content_block]) return ChatResponseUpdate( contents=contents, - raw_response=event, + raw_representation=event, ) case "content_block_delta": contents = self._parse_contents_from_anthropic([event.delta]) return ChatResponseUpdate( contents=contents, - raw_response=event, + raw_representation=event, ) case "content_block_stop": logger.debug("Received content_block_stop event; no content to process.") @@ -588,23 +595,49 @@ def _parse_contents_from_anthropic( ) case "tool_use" | "mcp_tool_use" | "server_tool_use": self._last_call_id_name = (content_block.id, content_block.name) - contents.append( - FunctionCallContent( - call_id=content_block.id, - name=content_block.name, - arguments=content_block.input, - raw_representation=content_block, + if content_block.type == "mcp_tool_use": + contents.append( + MCPServerToolCallContent( + call_id=content_block.id, + tool_name=content_block.name, + server_name=None, + arguments=content_block.input, + raw_representation=content_block, + ) + ) + elif "code_execution" in (content_block.name or ""): + contents.append( + CodeInterpreterToolCallContent( + call_id=content_block.id, + inputs=[TextContent(text=str(content_block.input), raw_representation=content_block)], + raw_representation=content_block, + ) + ) + else: + contents.append( + FunctionCallContent( + call_id=content_block.id, + name=content_block.name, + arguments=content_block.input, + raw_representation=content_block, + ) ) - ) case "mcp_tool_result": call_id, name = self._last_call_id_name or (None, None) + parsed_output: list[Contents] | None = None + if content_block.content: + if isinstance(content_block.content, list): + parsed_output = self._parse_contents_from_anthropic(content_block.content) + elif isinstance(content_block.content, (str, bytes)): + parsed_output = [ + TextContent(text=str(content_block.content), raw_representation=content_block) + ] + else: + parsed_output = self._parse_contents_from_anthropic([content_block.content]) contents.append( - FunctionResultContent( + MCPServerToolResultContent( call_id=content_block.tool_use_id, - name=name if name and call_id == content_block.tool_use_id else "mcp_tool", - result=self._parse_contents_from_anthropic(content_block.content) - if isinstance(content_block.content, list) - else content_block.content, + output=parsed_output, raw_representation=content_block, ) ) @@ -618,30 +651,183 @@ def _parse_contents_from_anthropic( raw_representation=content_block, ) ) - case ( - "code_execution_tool_result" - | "bash_code_execution_tool_result" - | "text_editor_code_execution_tool_result" - ): - call_id, name = self._last_call_id_name or (None, None) - if ( - content_block.content - and ( - content_block.content.type == "bash_code_execution_result" - or content_block.content.type == "code_execution_result" + case "code_execution_tool_result": + code_outputs: list[Contents] = [] + if content_block.content: + if isinstance(content_block.content, BetaCodeExecutionToolResultError): + code_outputs.append( + ErrorContent( + message=content_block.content.error_code, + raw_representation=content_block.content, + ) + ) + else: + if content_block.content.stdout: + code_outputs.append( + TextContent( + text=content_block.content.stdout, + raw_representation=content_block.content, + ) + ) + if content_block.content.stderr: + code_outputs.append( + ErrorContent( + message=content_block.content.stderr, + raw_representation=content_block.content, + ) + ) + for code_file_content in content_block.content.content: + code_outputs.append( + HostedFileContent( + file_id=code_file_content.file_id, raw_representation=code_file_content + ) + ) + contents.append( + CodeInterpreterToolResultContent( + call_id=content_block.tool_use_id, + raw_representation=content_block, + outputs=code_outputs, ) - and content_block.content.content - ): - for result_content in content_block.content.content: - if hasattr(result_content, "file_id"): + ) + case "bash_code_execution_tool_result": + bash_outputs: list[Contents] = [] + if content_block.content: + if isinstance( + content_block.content, + BetaBashCodeExecutionToolResultError, + ): + bash_outputs.append( + ErrorContent( + message=content_block.content.error_code, + raw_representation=content_block.content, + ) + ) + else: + if content_block.content.stdout: + bash_outputs.append( + TextContent( + text=content_block.content.stdout, + raw_representation=content_block.content, + ) + ) + if content_block.content.stderr: + bash_outputs.append( + ErrorContent( + message=content_block.content.stderr, + raw_representation=content_block.content, + ) + ) + for bash_file_content in content_block.content.content: contents.append( - HostedFileContent(file_id=result_content.file_id, raw_representation=result_content) + HostedFileContent( + file_id=bash_file_content.file_id, raw_representation=bash_file_content + ) ) contents.append( FunctionResultContent( call_id=content_block.tool_use_id, - name=name if name and call_id == content_block.tool_use_id else "code_execution_tool", - result=content_block.content, + name=content_block.type, + result=bash_outputs, + raw_representation=content_block, + ) + ) + case "text_editor_code_execution_tool_result": + text_editor_outputs: list[Contents] = [] + match content_block.content.type: + case "text_editor_code_execution_tool_result_error": + text_editor_outputs.append( + ErrorContent( + message=content_block.content.error_code + and getattr(content_block.content, "error_message", ""), + raw_representation=content_block.content, + ) + ) + case "text_editor_code_execution_view_result": + annotations = ( + [ + CitationAnnotation( + raw_representation=content_block.content, + annotated_regions=[ + TextSpanRegion( + start_index=content_block.content.start_line, + end_index=content_block.content.start_line + + (content_block.content.num_lines or 0), + ) + ], + ) + ] + if content_block.content.num_lines is not None + and content_block.content.start_line is not None + else None + ) + text_editor_outputs.append( + TextContent( + text=content_block.content.content, + annotations=annotations, + raw_representation=content_block.content, + ) + ) + case "text_editor_code_execution_str_replace_result": + old_annotation = ( + CitationAnnotation( + raw_representation=content_block.content, + annotated_regions=[ + TextSpanRegion( + start_index=content_block.content.old_start or 0, + end_index=( + (content_block.content.old_start or 0) + + (content_block.content.old_lines or 0) + ), + ) + ], + ) + if content_block.content.old_lines is not None + and content_block.content.old_start is not None + else None + ) + new_annotation = ( + CitationAnnotation( + raw_representation=content_block.content, + snippet="\n".join(content_block.content.lines) + if content_block.content.lines + else None, + annotated_regions=[ + TextSpanRegion( + start_index=content_block.content.new_start or 0, + end_index=( + (content_block.content.new_start or 0) + + (content_block.content.new_lines or 0) + ), + ) + ], + ) + if content_block.content.new_lines is not None + and content_block.content.new_start is not None + else None + ) + annotations = [ann for ann in [old_annotation, new_annotation] if ann is not None] + + text_editor_outputs.append( + TextContent( + text=( + "\n".join(content_block.content.lines) if content_block.content.lines else "" + ), + annotations=annotations or None, + raw_representation=content_block.content, + ) + ) + case "text_editor_code_execution_create_result": + text_editor_outputs.append( + TextContent( + text=f"File update: {content_block.content.is_file_update}", + raw_representation=content_block.content, + ) + ) + contents.append( + FunctionResultContent( + call_id=content_block.tool_use_id, + name=content_block.type, + result=text_editor_outputs, raw_representation=content_block, ) ) diff --git a/python/packages/anthropic/pyproject.toml b/python/packages/anthropic/pyproject.toml index 3488c05bca..73528ccfec 100644 --- a/python/packages/anthropic/pyproject.toml +++ b/python/packages/anthropic/pyproject.toml @@ -4,7 +4,7 @@ description = "Anthropic integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251223" +version = "1.0.0b260107" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/azure-ai-search/pyproject.toml b/python/packages/azure-ai-search/pyproject.toml index 7c14344ecc..3c782022a6 100644 --- a/python/packages/azure-ai-search/pyproject.toml +++ b/python/packages/azure-ai-search/pyproject.toml @@ -4,7 +4,7 @@ description = "Azure AI Search integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251223" +version = "1.0.0b260107" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_client.py index e10fc19068..2e8edc7e47 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_client.py @@ -2,7 +2,7 @@ import sys from collections.abc import Mapping, MutableSequence -from typing import Any, ClassVar, TypeVar +from typing import Any, ClassVar, TypeVar, cast from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, @@ -300,13 +300,26 @@ def _create_text_format_config( raise ServiceInvalidRequestError("response_format must be a Pydantic model or mapping.") async def _get_agent_reference_or_create( - self, run_options: dict[str, Any], messages_instructions: str | None + self, + run_options: dict[str, Any], + messages_instructions: str | None, + chat_options: ChatOptions | None = None, ) -> dict[str, str]: """Determine which agent to use and create if needed. + Args: + run_options: The prepared options for the API call. + messages_instructions: Instructions extracted from messages. + chat_options: The chat options containing response_format and other settings. + Returns: dict[str, str]: The agent reference to use. """ + # chat_options is needed separately because the base class excludes response_format + # from run_options (transforming it to text/text_format for OpenAI). Azure's agent + # creation API requires the original response_format to build its own config format. + if chat_options is None: + chat_options = ChatOptions() # Agent name must be explicitly provided by the user. if self.agent_name is None: raise ServiceInitializationError( @@ -341,8 +354,14 @@ async def _get_agent_reference_or_create( if "top_p" in run_options: args["top_p"] = run_options["top_p"] - if "response_format" in run_options: - response_format = run_options["response_format"] + # response_format is accessed from chat_options or additional_properties + # since the base class excludes it from run_options + response_format: Any = ( + chat_options.response_format + if chat_options.response_format is not None + else chat_options.additional_properties.get("response_format") + ) + if response_format: args["text"] = PromptAgentDefinitionText(format=self._create_text_format_config(response_format)) # Combine instructions from messages and options @@ -379,20 +398,67 @@ async def _prepare_options( """Take ChatOptions and create the specific options for Azure AI.""" prepared_messages, instructions = self._prepare_messages_for_azure_ai(messages) run_options = await super()._prepare_options(prepared_messages, chat_options, **kwargs) + + # WORKAROUND: Azure AI Projects 'create responses' API has schema divergence from OpenAI's + # Responses API. Azure requires 'type' at item level and 'annotations' in content items. + # See: https://github.com/Azure/azure-sdk-for-python/issues/44493 + # See: https://github.com/microsoft/agent-framework/issues/2926 + # TODO(agent-framework#2926): Remove this workaround when Azure SDK aligns with OpenAI schema. + if "input" in run_options and isinstance(run_options["input"], list): + run_options["input"] = self._transform_input_for_azure_ai(cast(list[dict[str, Any]], run_options["input"])) + if not self._is_application_endpoint: # Application-scoped response APIs do not support "agent" property. - agent_reference = await self._get_agent_reference_or_create(run_options, instructions) + agent_reference = await self._get_agent_reference_or_create(run_options, instructions, chat_options) run_options["extra_body"] = {"agent": agent_reference} # Remove properties that are not supported on request level # but were configured on agent level - exclude = ["model", "tools", "response_format", "temperature", "top_p"] + exclude = ["model", "tools", "response_format", "temperature", "top_p", "text", "text_format"] for property in exclude: run_options.pop(property, None) return run_options + def _transform_input_for_azure_ai(self, input_items: list[dict[str, Any]]) -> list[dict[str, Any]]: + """Transform input items to match Azure AI Projects expected schema. + + WORKAROUND: Azure AI Projects 'create responses' API expects a different schema than OpenAI's + Responses API. Azure requires 'type' at the item level, and requires 'annotations' + only for output_text content items (assistant messages), not for input_text content items + (user messages). This helper adapts the OpenAI-style input to the Azure schema. + + See: https://github.com/Azure/azure-sdk-for-python/issues/44493 + TODO(agent-framework#2926): Remove when Azure SDK aligns with OpenAI schema. + """ + transformed: list[dict[str, Any]] = [] + for item in input_items: + new_item: dict[str, Any] = dict(item) + + # Add 'type': 'message' at item level for role-based items + if "role" in new_item and "type" not in new_item: + new_item["type"] = "message" + + # Add 'annotations' only to output_text content items (assistant messages) + # User messages (input_text) do NOT support annotations in Azure AI + if "content" in new_item and isinstance(new_item["content"], list): + new_content: list[dict[str, Any] | Any] = [] + for content_item in new_item["content"]: + if isinstance(content_item, dict): + new_content_item: dict[str, Any] = dict(content_item) + # Only add annotations to output_text (assistant content) + if new_content_item.get("type") == "output_text" and "annotations" not in new_content_item: + new_content_item["annotations"] = [] + new_content.append(new_content_item) + else: + new_content.append(content_item) + new_item["content"] = new_content + + transformed.append(new_item) + + return transformed + @override def _get_current_conversation_id(self, chat_options: ChatOptions, **kwargs: Any) -> str | None: """Get the current conversation ID from chat options or kwargs.""" diff --git a/python/packages/azure-ai/pyproject.toml b/python/packages/azure-ai/pyproject.toml index 67d89eb81b..37491b42e5 100644 --- a/python/packages/azure-ai/pyproject.toml +++ b/python/packages/azure-ai/pyproject.toml @@ -4,7 +4,7 @@ description = "Azure AI Foundry integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251223" +version = "1.0.0b260107" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index 028e8fbdb8..3b1b500ede 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -286,6 +286,93 @@ async def test_azure_ai_client_prepare_messages_for_azure_ai_no_system_messages( assert instructions is None +def test_azure_ai_client_transform_input_for_azure_ai(mock_project_client: MagicMock) -> None: + """Test _transform_input_for_azure_ai adds required fields for Azure AI schema. + + WORKAROUND TEST: Azure AI Projects API requires 'type' at item level and + 'annotations' in output_text content items, which OpenAI's Responses API does not require. + See: https://github.com/Azure/azure-sdk-for-python/issues/44493 + See: https://github.com/microsoft/agent-framework/issues/2926 + """ + client = create_test_azure_ai_client(mock_project_client) + + # Input in OpenAI Responses API format (what agent-framework generates) + openai_format_input = [ + { + "role": "user", + "content": [ + {"type": "input_text", "text": "Hello"}, + ], + }, + { + "role": "assistant", + "content": [ + {"type": "output_text", "text": "Hi there!"}, + ], + }, + ] + + result = client._transform_input_for_azure_ai(openai_format_input) # type: ignore + + # Verify 'type': 'message' added at item level + assert result[0]["type"] == "message" + assert result[1]["type"] == "message" + + # Verify 'annotations' added ONLY to output_text (assistant) content, NOT input_text (user) + assert result[0]["content"][0]["type"] == "input_text" # user content type preserved + assert "annotations" not in result[0]["content"][0] # user message - no annotations + assert result[1]["content"][0]["type"] == "output_text" # assistant content type preserved + assert result[1]["content"][0]["annotations"] == [] # assistant message - has annotations + + # Verify original fields preserved + assert result[0]["role"] == "user" + assert result[0]["content"][0]["text"] == "Hello" + assert result[1]["role"] == "assistant" + assert result[1]["content"][0]["text"] == "Hi there!" + + +def test_azure_ai_client_transform_input_preserves_existing_fields(mock_project_client: MagicMock) -> None: + """Test _transform_input_for_azure_ai preserves existing type and annotations.""" + client = create_test_azure_ai_client(mock_project_client) + + # Input that already has the fields (shouldn't duplicate) + input_with_fields = [ + { + "type": "message", + "role": "assistant", + "content": [ + {"type": "output_text", "text": "Hello", "annotations": [{"some": "annotation"}]}, + ], + }, + ] + + result = client._transform_input_for_azure_ai(input_with_fields) # type: ignore + + # Should preserve existing values, not overwrite + assert result[0]["type"] == "message" + assert result[0]["content"][0]["annotations"] == [{"some": "annotation"}] + + +def test_azure_ai_client_transform_input_handles_non_dict_content(mock_project_client: MagicMock) -> None: + """Test _transform_input_for_azure_ai handles non-dict content items.""" + client = create_test_azure_ai_client(mock_project_client) + + # Input with string content (edge case) + input_with_string_content = [ + { + "role": "user", + "content": ["plain string content"], + }, + ] + + result = client._transform_input_for_azure_ai(input_with_string_content) # type: ignore + + # Should add 'type': 'message' at item level even with non-dict content + assert result[0]["type"] == "message" + # Non-dict content items should be preserved without modification + assert result[0]["content"] == ["plain string content"] + + async def test_azure_ai_client_prepare_options_basic(mock_project_client: MagicMock) -> None: """Test prepare_options basic functionality.""" client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") @@ -636,9 +723,10 @@ async def test_azure_ai_client_agent_creation_with_response_format( mock_agent.version = "1.0" mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent) - run_options = {"model": "test-model", "response_format": ResponseFormatModel} + run_options = {"model": "test-model"} + chat_options = ChatOptions(response_format=ResponseFormatModel) - await client._get_agent_reference_or_create(run_options, None) # type: ignore + await client._get_agent_reference_or_create(run_options, None, chat_options) # type: ignore # Verify agent was created with response format configuration call_args = mock_project_client.agents.create_version.call_args @@ -689,19 +777,18 @@ async def test_azure_ai_client_agent_creation_with_mapping_response_format( "additionalProperties": False, } - run_options = { - "model": "test-model", - "response_format": { - "type": "json_schema", - "json_schema": { - "name": runtime_schema["title"], - "strict": True, - "schema": runtime_schema, - }, + run_options = {"model": "test-model"} + response_format_mapping = { + "type": "json_schema", + "json_schema": { + "name": runtime_schema["title"], + "strict": True, + "schema": runtime_schema, }, } + chat_options = ChatOptions(response_format=response_format_mapping) # type: ignore - await client._get_agent_reference_or_create(run_options, None) # type: ignore + await client._get_agent_reference_or_create(run_options, None, chat_options) # type: ignore call_args = mock_project_client.agents.create_version.call_args created_definition = call_args[1]["definition"] @@ -718,7 +805,7 @@ async def test_azure_ai_client_agent_creation_with_mapping_response_format( async def test_azure_ai_client_prepare_options_excludes_response_format( mock_project_client: MagicMock, ) -> None: - """Test that prepare_options excludes response_format from final run options.""" + """Test that prepare_options excludes response_format, text, and text_format from final run options.""" client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] @@ -728,7 +815,12 @@ async def test_azure_ai_client_prepare_options_excludes_response_format( patch.object( client.__class__.__bases__[0], "_prepare_options", - return_value={"model": "test-model", "response_format": ResponseFormatModel}, + return_value={ + "model": "test-model", + "response_format": ResponseFormatModel, + "text": {"format": {"type": "json_schema", "name": "test"}}, + "text_format": ResponseFormatModel, + }, ), patch.object( client, @@ -738,8 +830,11 @@ async def test_azure_ai_client_prepare_options_excludes_response_format( ): run_options = await client._prepare_options(messages, chat_options) - # response_format should be excluded from final run options + # response_format, text, and text_format should be excluded from final run options + # because they are configured at agent level, not request level assert "response_format" not in run_options + assert "text" not in run_options + assert "text_format" not in run_options # But extra_body should contain agent reference assert "extra_body" in run_options assert run_options["extra_body"]["agent"]["name"] == "test-agent" @@ -922,3 +1017,91 @@ async def test_azure_ai_chat_client_agent_with_tools() -> None: assert response.text is not None assert len(response.text) > 0 assert any(word in response.text.lower() for word in ["sunny", "25"]) + + +class ReleaseBrief(BaseModel): + """Structured output model for release brief.""" + + title: str = Field(description="A short title for the release.") + summary: str = Field(description="A brief summary of what was released.") + highlights: list[str] = Field(description="Key highlights from the release.") + model_config = ConfigDict(extra="forbid") + + +@pytest.mark.flaky +@skip_if_azure_ai_integration_tests_disabled +async def test_azure_ai_chat_client_agent_with_response_format() -> None: + """Test ChatAgent with response_format (structured output) using AzureAIClient.""" + async with ( + temporary_chat_client(agent_name="ResponseFormatAgent") as chat_client, + ChatAgent(chat_client=chat_client) as agent, + ): + response = await agent.run( + "Summarize the following release notes into a ReleaseBrief:\n\n" + "Version 2.0 Release Notes:\n" + "- Added new streaming API for real-time responses\n" + "- Improved error handling with detailed messages\n" + "- Performance boost of 50% in batch processing\n" + "- Fixed memory leak in connection pooling", + response_format=ReleaseBrief, + ) + + # Validate response + assert isinstance(response, AgentRunResponse) + assert response.value is not None + assert isinstance(response.value, ReleaseBrief) + + # Validate structured output fields + brief = response.value + assert len(brief.title) > 0 + assert len(brief.summary) > 0 + assert len(brief.highlights) > 0 + + +@pytest.mark.flaky +@skip_if_azure_ai_integration_tests_disabled +async def test_azure_ai_chat_client_agent_with_runtime_json_schema() -> None: + """Test ChatAgent with runtime JSON schema (structured output) using AzureAIClient.""" + runtime_schema = { + "title": "WeatherDigest", + "type": "object", + "properties": { + "location": {"type": "string"}, + "conditions": {"type": "string"}, + "temperature_c": {"type": "number"}, + "advisory": {"type": "string"}, + }, + "required": ["location", "conditions", "temperature_c", "advisory"], + "additionalProperties": False, + } + + async with ( + temporary_chat_client(agent_name="RuntimeSchemaAgent") as chat_client, + ChatAgent(chat_client=chat_client) as agent, + ): + response = await agent.run( + "Give a brief weather digest for Seattle.", + additional_chat_options={ + "response_format": { + "type": "json_schema", + "json_schema": { + "name": runtime_schema["title"], + "strict": True, + "schema": runtime_schema, + }, + }, + }, + ) + + # Validate response + assert isinstance(response, AgentRunResponse) + assert response.text is not None + + # Parse JSON and validate structure + import json + + parsed = json.loads(response.text) + assert "location" in parsed + assert "conditions" in parsed + assert "temperature_c" in parsed + assert "advisory" in parsed diff --git a/python/packages/azurefunctions/pyproject.toml b/python/packages/azurefunctions/pyproject.toml index d5bd833229..4a05986469 100644 --- a/python/packages/azurefunctions/pyproject.toml +++ b/python/packages/azurefunctions/pyproject.toml @@ -4,7 +4,7 @@ description = "Azure Functions integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251223" +version = "1.0.0b260107" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/azurefunctions/tests/integration_tests/test_03_callbacks.py b/python/packages/azurefunctions/tests/integration_tests/test_03_callbacks.py deleted file mode 100644 index 06414f993a..0000000000 --- a/python/packages/azurefunctions/tests/integration_tests/test_03_callbacks.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. -""" -Integration Tests for Callbacks Sample - -Tests the callbacks sample for event tracking and management. - -The function app is automatically started by the test fixture. - -Prerequisites: -- Azure OpenAI credentials configured (see packages/azurefunctions/tests/integration_tests/.env.example) -- Azurite or Azure Storage account configured - -Usage: - uv run pytest packages/azurefunctions/tests/integration_tests/test_03_callbacks.py -v -""" - -from typing import Any - -import pytest -import requests - -from .testutils import ( - TIMEOUT, - SampleTestHelper, - skip_if_azure_functions_integration_tests_disabled, -) - -# Module-level markers - applied to all tests in this file -pytestmark = [ - pytest.mark.sample("03_callbacks"), - pytest.mark.usefixtures("function_app_for_test"), - skip_if_azure_functions_integration_tests_disabled, -] - - -class TestSampleCallbacks: - """Tests for 03_callbacks sample.""" - - @pytest.fixture(autouse=True) - def _set_base_url(self, base_url: str) -> None: - """Provide the callback agent base URL for each test.""" - self.base_url = f"{base_url}/api/agents/CallbackAgent" - - @staticmethod - def _wait_for_callback_events(base_url: str, thread_id: str) -> list[dict[str, Any]]: - events: list[dict[str, Any]] = [] - response = SampleTestHelper.get(f"{base_url}/callbacks/{thread_id}") - if response.status_code == 200: - events = response.json() - return events - - def test_agent_with_callbacks(self) -> None: - """Test agent execution with callback tracking.""" - thread_id = "test-callback" - - response = SampleTestHelper.post_json( - f"{self.base_url}/run", - {"message": "Tell me about Python", "thread_id": thread_id}, - ) - assert response.status_code == 200 - data = response.json() - - assert data["status"] == "success" - - events = self._wait_for_callback_events(self.base_url, thread_id) - - assert events - assert any(event.get("event_type") == "final" for event in events) - - def test_get_callbacks(self) -> None: - """Test retrieving callback events.""" - thread_id = "test-callback-retrieve" - - # Send a message first - SampleTestHelper.post_json( - f"{self.base_url}/run", - {"message": "Hello", "thread_id": thread_id, "wait_for_response": False}, - ) - - # Get callbacks - response = SampleTestHelper.get(f"{self.base_url}/callbacks/{thread_id}") - assert response.status_code == 200 - data = response.json() - assert isinstance(data, list) - - def test_delete_callbacks(self) -> None: - """Test clearing callback events.""" - thread_id = "test-callback-delete" - - # Send a message first - SampleTestHelper.post_json( - f"{self.base_url}/run", - {"message": "Test", "thread_id": thread_id, "wait_for_response": False}, - ) - - # Delete callbacks - response = requests.delete(f"{self.base_url}/callbacks/{thread_id}", timeout=TIMEOUT) - assert response.status_code == 204 - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/python/packages/azurefunctions/tests/integration_tests/test_03_reliable_streaming.py b/python/packages/azurefunctions/tests/integration_tests/test_03_reliable_streaming.py new file mode 100644 index 0000000000..44fb8efb2f --- /dev/null +++ b/python/packages/azurefunctions/tests/integration_tests/test_03_reliable_streaming.py @@ -0,0 +1,125 @@ +# Copyright (c) Microsoft. All rights reserved. +""" +Integration Tests for Reliable Streaming Sample + +Tests the reliable streaming sample using Redis Streams for persistent message delivery. + +The function app is automatically started by the test fixture. + +Prerequisites: +- Azure OpenAI credentials configured (see packages/azurefunctions/tests/integration_tests/.env.example) +- Azurite or Azure Storage account configured +- Redis running (docker run -d --name redis -p 6379:6379 redis:latest) + +Usage: + uv run pytest packages/azurefunctions/tests/integration_tests/test_03_reliable_streaming.py -v +""" + +import time + +import pytest +import requests + +from .testutils import ( + SampleTestHelper, + skip_if_azure_functions_integration_tests_disabled, +) + +# Module-level markers - applied to all tests in this file +pytestmark = [ + pytest.mark.sample("03_reliable_streaming"), + pytest.mark.usefixtures("function_app_for_test"), + skip_if_azure_functions_integration_tests_disabled, +] + + +class TestSampleReliableStreaming: + """Tests for 03_reliable_streaming sample.""" + + @pytest.fixture(autouse=True) + def _set_base_url(self, base_url: str) -> None: + """Provide the base URL for each test.""" + self.base_url = base_url + self.agent_url = f"{base_url}/api/agents/TravelPlanner" + self.stream_url = f"{base_url}/api/agent/stream" + + def test_agent_run_and_stream(self) -> None: + """Test agent execution with Redis streaming.""" + # Start agent run + response = SampleTestHelper.post_json( + f"{self.agent_url}/run", + {"message": "Plan a 1-day trip to Seattle in 1 sentence", "wait_for_response": False}, + ) + assert response.status_code == 202 + data = response.json() + + thread_id = data.get("thread_id") + + # Wait a moment for the agent to start writing to Redis + time.sleep(2) + + # Stream response from Redis with shorter timeout + # Note: We use text/plain to avoid SSE parsing complexity + stream_response = requests.get( + f"{self.stream_url}/{thread_id}", + headers={"Accept": "text/plain"}, + timeout=30, # Shorter timeout for test + ) + assert stream_response.status_code == 200 + + def test_stream_with_sse_format(self) -> None: + """Test streaming with Server-Sent Events format.""" + # Start agent run + response = SampleTestHelper.post_json( + f"{self.agent_url}/run", + {"message": "What's the weather like?", "wait_for_response": False}, + ) + assert response.status_code == 202 + data = response.json() + thread_id = data.get("thread_id") + + # Wait for agent to start writing + time.sleep(2) + + # Stream with SSE format + stream_response = requests.get( + f"{self.stream_url}/{thread_id}", + headers={"Accept": "text/event-stream"}, + timeout=30, # Shorter timeout + ) + assert stream_response.status_code == 200 + content_type = stream_response.headers.get("content-type", "") + assert "text/event-stream" in content_type + + # Check for SSE event markers if we got content + content = stream_response.text + if content: + assert "event:" in content or "data:" in content + + def test_stream_nonexistent_conversation(self) -> None: + """Test streaming from a non-existent conversation. + + The endpoint will wait for data in Redis, but since the conversation + doesn't exist, it will timeout. This is expected behavior. + """ + fake_id = "nonexistent-conversation-12345" + + # Should timeout since the conversation doesn't exist + with pytest.raises(requests.exceptions.ReadTimeout): + requests.get( + f"{self.stream_url}/{fake_id}", + headers={"Accept": "text/plain"}, + timeout=10, # Short timeout for non-existent ID + ) + + def test_health_endpoint(self) -> None: + """Test health check endpoint.""" + response = SampleTestHelper.get(f"{self.base_url}/api/health") + assert response.status_code == 200 + data = response.json() + assert data["status"] == "healthy" + assert "agents" in data + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/python/packages/bedrock/pyproject.toml b/python/packages/bedrock/pyproject.toml index ea6cffda42..2e60e11288 100644 --- a/python/packages/bedrock/pyproject.toml +++ b/python/packages/bedrock/pyproject.toml @@ -4,7 +4,7 @@ description = "Amazon Bedrock integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251120" +version = "1.0.0b260107" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/chatkit/pyproject.toml b/python/packages/chatkit/pyproject.toml index 7e6d6ab846..3fa92669f2 100644 --- a/python/packages/chatkit/pyproject.toml +++ b/python/packages/chatkit/pyproject.toml @@ -4,7 +4,7 @@ description = "OpenAI ChatKit integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251223" +version = "1.0.0b260107" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/copilotstudio/pyproject.toml b/python/packages/copilotstudio/pyproject.toml index af20dc4c01..0ead8b437d 100644 --- a/python/packages/copilotstudio/pyproject.toml +++ b/python/packages/copilotstudio/pyproject.toml @@ -4,7 +4,7 @@ description = "Copilot Studio integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251223" +version = "1.0.0b260107" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py index bfb2c3f7d4..6743902475 100644 --- a/python/packages/core/agent_framework/_clients.py +++ b/python/packages/core/agent_framework/_clients.py @@ -101,7 +101,7 @@ async def get_response( stop: str | Sequence[str] | None = None, store: bool | None = None, temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", + tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] @@ -160,7 +160,7 @@ def get_streaming_response( stop: str | Sequence[str] | None = None, store: bool | None = None, temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", + tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] @@ -501,7 +501,7 @@ async def get_response( stop: str | Sequence[str] | None = None, store: bool | None = None, temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", + tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] @@ -596,7 +596,7 @@ async def get_streaming_response( stop: str | Sequence[str] | None = None, store: bool | None = None, temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", + tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] @@ -688,12 +688,18 @@ def _prepare_tool_choice(self, chat_options: ChatOptions) -> None: chat_options: The chat options to prepare. """ chat_tool_mode = chat_options.tool_choice - if chat_tool_mode is None or chat_tool_mode == ToolMode.NONE or chat_tool_mode == "none": + # Explicitly disabled: clear tools and set to NONE + if chat_tool_mode == ToolMode.NONE or chat_tool_mode == "none": chat_options.tools = None chat_options.tool_choice = ToolMode.NONE return + # No tools available: set to NONE regardless of requested mode if not chat_options.tools: chat_options.tool_choice = ToolMode.NONE + # Tools available but no explicit mode: default to AUTO + elif chat_tool_mode is None: + chat_options.tool_choice = ToolMode.AUTO + # Tools available with explicit mode: preserve the mode else: chat_options.tool_choice = chat_tool_mode diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py index 07b11811f3..24481c3b3b 100644 --- a/python/packages/core/agent_framework/_tools.py +++ b/python/packages/core/agent_framework/_tools.py @@ -16,6 +16,7 @@ Generic, Literal, Protocol, + TypedDict, TypeVar, cast, get_args, @@ -73,6 +74,7 @@ "FunctionInvocationConfiguration", "HostedCodeInterpreterTool", "HostedFileSearchTool", + "HostedImageGenerationTool", "HostedMCPSpecificApproval", "HostedMCPTool", "HostedWebSearchTool", @@ -324,6 +326,41 @@ def __init__( super().__init__(**args) +class HostedImageGenerationToolOptions(TypedDict, total=False): + """Options for HostedImageGenerationTool.""" + + count: int + image_size: str + media_type: str + model_id: str + response_format: Literal["uri", "data", "hosted"] + streaming_count: int + + +class HostedImageGenerationTool(BaseTool): + """Represents a hosted tool that can be specified to an AI service to enable it to perform image generation.""" + + def __init__( + self, + *, + options: HostedImageGenerationToolOptions | None = None, + description: str | None = None, + additional_properties: dict[str, Any] | None = None, + **kwargs: Any, + ): + """Initialize a HostedImageGenerationTool.""" + if "name" in kwargs: + raise ValueError("The 'name' argument is reserved for the HostedImageGenerationTool and cannot be set.") + + self.options = options + super().__init__( + name="image_generation", + description=description or "", + additional_properties=additional_properties, + **kwargs, + ) + + class HostedMCPSpecificApproval(TypedDict, total=False): """Represents the specific mode for a hosted tool. @@ -1419,14 +1456,11 @@ async def _auto_invoke_function( Raises: KeyError: If the requested function is not found in the tool map. """ - from ._types import ( - FunctionResultContent, - ) - # Note: The scenarios for approval_mode="always_require", declaration_only, and # terminate_on_unknown_calls are all handled in _try_execute_function_calls before # this function is called. This function only handles the actual execution of approved, # non-declaration-only functions. + from ._types import FunctionCallContent, FunctionResultContent tool: AIFunction[BaseModel, Any] | None = None if function_call_content.type == "function_call": @@ -1444,11 +1478,14 @@ async def _auto_invoke_function( else: # Note: Unapproved tools (approved=False) are handled in _replace_approval_contents_with_results # and never reach this function, so we only handle approved=True cases here. - tool = tool_map.get(function_call_content.function_call.name) + inner_call = function_call_content.function_call + if not isinstance(inner_call, FunctionCallContent): + return function_call_content + tool = tool_map.get(inner_call.name) if tool is None: # we assume it is a hosted tool return function_call_content - function_call_content = function_call_content.function_call + function_call_content = inner_call parsed_args: dict[str, Any] = dict(function_call_content.parse_arguments() or {}) diff --git a/python/packages/core/agent_framework/_types.py b/python/packages/core/agent_framework/_types.py index f804aae052..ebe3d23e6f 100644 --- a/python/packages/core/agent_framework/_types.py +++ b/python/packages/core/agent_framework/_types.py @@ -40,6 +40,8 @@ "ChatResponse", "ChatResponseUpdate", "CitationAnnotation", + "CodeInterpreterToolCallContent", + "CodeInterpreterToolResultContent", "Contents", "DataContent", "ErrorContent", @@ -50,6 +52,10 @@ "FunctionResultContent", "HostedFileContent", "HostedVectorStoreContent", + "ImageGenerationToolCallContent", + "ImageGenerationToolResultContent", + "MCPServerToolCallContent", + "MCPServerToolResultContent", "Role", "TextContent", "TextReasoningContent", @@ -121,6 +127,18 @@ def _parse_content(content_data: MutableMapping[str, Any]) -> "Contents": return HostedFileContent.from_dict(content_data) case "hosted_vector_store": return HostedVectorStoreContent.from_dict(content_data) + case "code_interpreter_tool_call": + return CodeInterpreterToolCallContent.from_dict(content_data) + case "code_interpreter_tool_result": + return CodeInterpreterToolResultContent.from_dict(content_data) + case "image_generation_tool_call": + return ImageGenerationToolCallContent.from_dict(content_data) + case "image_generation_tool_result": + return ImageGenerationToolResultContent.from_dict(content_data) + case "mcp_server_tool_call": + return MCPServerToolCallContent.from_dict(content_data) + case "mcp_server_tool_result": + return MCPServerToolResultContent.from_dict(content_data) case "function_approval_request": return FunctionApprovalRequestContent.from_dict(content_data) case "function_approval_response": @@ -1607,6 +1625,8 @@ def __init__( self, file_id: str, *, + media_type: str | None = None, + name: str | None = None, additional_properties: dict[str, Any] | None = None, raw_representation: Any | None = None, **kwargs: Any, @@ -1615,6 +1635,8 @@ def __init__( Args: file_id: The identifier of the hosted file. + media_type: Optional media type of the hosted file. + name: Optional display name of the hosted file. Keyword Args: additional_properties: Optional additional properties associated with the content. @@ -1627,8 +1649,14 @@ def __init__( **kwargs, ) self.file_id = file_id + self.media_type = media_type + self.name = name self.type: Literal["hosted_file"] = "hosted_file" + def has_top_level_media_type(self, top_level_media_type: Literal["application", "audio", "image", "text"]) -> bool: + """Returns a boolean indicating if the media type has the specified top-level media type.""" + return _has_top_level_media_type(self.media_type, top_level_media_type) + class HostedVectorStoreContent(BaseContent): """Represents a hosted vector store content. @@ -1676,6 +1704,234 @@ def __init__( self.type: Literal["hosted_vector_store"] = "hosted_vector_store" +class CodeInterpreterToolCallContent(BaseContent): + """Represents a code interpreter tool call invocation by a hosted service.""" + + def __init__( + self, + *, + call_id: str | None = None, + inputs: Sequence["Contents | MutableMapping[str, Any]"] | None = None, + annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, + additional_properties: dict[str, Any] | None = None, + raw_representation: Any | None = None, + **kwargs: Any, + ) -> None: + super().__init__( + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + **kwargs, + ) + self.call_id = call_id + self.inputs: list["Contents"] | None = None + if inputs: + normalized_inputs: Sequence["Contents | MutableMapping[str, Any]"] = ( + inputs + if isinstance(inputs, Sequence) and not isinstance(inputs, (str, bytes, MutableMapping)) + else [inputs] + ) + self.inputs = _parse_content_list(list(normalized_inputs)) + self.type: Literal["code_interpreter_tool_call"] = "code_interpreter_tool_call" + + +class CodeInterpreterToolResultContent(BaseContent): + """Represents the result of a code interpreter tool invocation by a hosted service.""" + + def __init__( + self, + *, + call_id: str | None = None, + outputs: Sequence["Contents | MutableMapping[str, Any]"] | None = None, + annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, + additional_properties: dict[str, Any] | None = None, + raw_representation: Any | None = None, + **kwargs: Any, + ) -> None: + super().__init__( + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + **kwargs, + ) + self.call_id = call_id + self.outputs: list["Contents"] | None = None + if outputs: + normalized_outputs: Sequence["Contents | MutableMapping[str, Any]"] = ( + outputs + if isinstance(outputs, Sequence) and not isinstance(outputs, (str, bytes, MutableMapping)) + else [outputs] + ) + self.outputs = _parse_content_list(list(normalized_outputs)) + self.type: Literal["code_interpreter_tool_result"] = "code_interpreter_tool_result" + + +class ImageGenerationToolCallContent(BaseContent): + """Represents the invocation of an image generation tool call by a hosted service.""" + + def __init__( + self, + *, + image_id: str | None = None, + annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, + additional_properties: dict[str, Any] | None = None, + raw_representation: Any | None = None, + **kwargs: Any, + ) -> None: + """Initializes an ImageGenerationToolCallContent instance. + + Keyword Args: + image_id: The identifier of the image to be generated. + annotations: Optional annotations associated with the content. + additional_properties: Optional additional properties associated with the content. + raw_representation: Optional raw representation of the content. + **kwargs: Any additional keyword arguments. + + """ + super().__init__( + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + **kwargs, + ) + self.image_id = image_id + self.type: Literal["image_generation_tool_call"] = "image_generation_tool_call" + + +class ImageGenerationToolResultContent(BaseContent): + """Represents the result of an image generation tool call invocation by a hosted service.""" + + def __init__( + self, + *, + image_id: str | None = None, + outputs: DataContent | UriContent | None = None, + annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, + additional_properties: dict[str, Any] | None = None, + raw_representation: Any | None = None, + **kwargs: Any, + ) -> None: + """Initializes an ImageGenerationToolResultContent instance. + + Keyword Args: + image_id: The identifier of the generated image. + outputs: The outputs of the image generation tool call. + annotations: Optional annotations associated with the content. + additional_properties: Optional additional properties associated with the content. + raw_representation: Optional raw representation of the content. + **kwargs: Any additional keyword arguments. + + """ + super().__init__( + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + **kwargs, + ) + self.image_id = image_id + self.outputs: DataContent | UriContent | None = outputs + self.type: Literal["image_generation_tool_result"] = "image_generation_tool_result" + + +class MCPServerToolCallContent(BaseContent): + """Represents a tool call request to a MCP server.""" + + def __init__( + self, + call_id: str, + tool_name: str, + server_name: str | None = None, + *, + arguments: str | Mapping[str, Any] | None = None, + annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, + additional_properties: dict[str, Any] | None = None, + raw_representation: Any | None = None, + **kwargs: Any, + ) -> None: + """Initializes a MCPServerToolCallContent instance. + + Args: + call_id: The tool call identifier. + tool_name: The name of the tool requested. + server_name: The name of the MCP server where the tool is hosted. + + Keyword Args: + arguments: The arguments requested to be provided to the tool, + can be a string to allow gradual completion of the args. + annotations: Optional annotations associated with the content. + additional_properties: Optional additional properties associated with the content. + raw_representation: Optional raw representation of the content. + **kwargs: Any additional keyword arguments. + """ + if not call_id: + raise ValueError("call_id must be a non-empty string.") + if not tool_name: + raise ValueError("tool_name must be a non-empty string.") + super().__init__( + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + **kwargs, + ) + self.call_id = call_id + self.tool_name = tool_name + self.name = tool_name + self.server_name = server_name + self.arguments = arguments + self.type: Literal["mcp_server_tool_call"] = "mcp_server_tool_call" + + def parse_arguments(self) -> dict[str, Any] | None: + """Returns the parsed arguments for the MCP server tool call, if any.""" + if isinstance(self.arguments, str): + # If arguments are a string, try to parse it as JSON + try: + loaded = json.loads(self.arguments) + if isinstance(loaded, dict): + return loaded # type:ignore + return {"raw": loaded} + except (json.JSONDecodeError, TypeError): + return {"raw": self.arguments} + return cast(dict[str, Any] | None, self.arguments) + + +class MCPServerToolResultContent(BaseContent): + """Represents the result of a MCP server tool call.""" + + def __init__( + self, + call_id: str, + *, + output: Any | None = None, + annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, + additional_properties: dict[str, Any] | None = None, + raw_representation: Any | None = None, + **kwargs: Any, + ) -> None: + """Initializes a MCPServerToolResultContent instance. + + Args: + call_id: The identifier of the tool call for which this is the result. + + Keyword Args: + output: The output of the MCP server tool call. + annotations: Optional annotations associated with the content. + additional_properties: Optional additional properties associated with the content. + raw_representation: Optional raw representation of the content. + **kwargs: Any additional keyword arguments. + """ + if not call_id: + raise ValueError("call_id must be a non-empty string.") + super().__init__( + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + **kwargs, + ) + self.call_id = call_id + self.output: Any | None = output + self.type: Literal["mcp_server_tool_result"] = "mcp_server_tool_result" + + class BaseUserInputRequest(BaseContent): """Base class for all user requests.""" @@ -1736,7 +1992,7 @@ def __init__( approved: bool, *, id: str, - function_call: FunctionCallContent | MutableMapping[str, Any], + function_call: FunctionCallContent | MCPServerToolCallContent | MutableMapping[str, Any], annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, additional_properties: dict[str, Any] | None = None, raw_representation: Any | None = None, @@ -1764,8 +2020,12 @@ def __init__( self.id = id self.approved = approved # Convert dict to FunctionCallContent if needed (for SerializationMixin support) + self.function_call: FunctionCallContent | MCPServerToolCallContent if isinstance(function_call, MutableMapping): - self.function_call = FunctionCallContent.from_dict(function_call) + if function_call.get("type") == "mcp_server_tool_call": + self.function_call = MCPServerToolCallContent.from_dict(function_call) + else: + self.function_call = FunctionCallContent.from_dict(function_call) else: self.function_call = function_call # Override the type for this specific subclass @@ -1823,6 +2083,7 @@ def __init__( **kwargs, ) self.id = id + self.function_call: FunctionCallContent # Convert dict to FunctionCallContent if needed (for SerializationMixin support) if isinstance(function_call, MutableMapping): self.function_call = FunctionCallContent.from_dict(function_call) @@ -1854,6 +2115,12 @@ def create_response(self, approved: bool) -> "FunctionApprovalResponseContent": | UsageContent | HostedFileContent | HostedVectorStoreContent + | CodeInterpreterToolCallContent + | CodeInterpreterToolResultContent + | ImageGenerationToolCallContent + | ImageGenerationToolResultContent + | MCPServerToolCallContent + | MCPServerToolResultContent | FunctionApprovalRequestContent | FunctionApprovalResponseContent ) @@ -1869,6 +2136,9 @@ def _prepare_function_call_results_as_dumpable(content: Contents | Any | list[Co return content.model_dump() if hasattr(content, "to_dict"): return content.to_dict(exclude={"raw_representation", "additional_properties"}) + # Handle objects with text attribute (e.g., MCP TextContent) + if hasattr(content, "text") and isinstance(content.text, str): + return content.text return content diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py index 3cf37c4b49..7eec2472f0 100644 --- a/python/packages/core/agent_framework/_workflows/_agent.py +++ b/python/packages/core/agent_framework/_workflows/_agent.py @@ -309,6 +309,9 @@ def _convert_workflow_event_to_agent_update( if isinstance(executor, AgentExecutor) and not executor.output_response: return None if update: + # Enrich with executor identity if author_name is not already set + if not update.author_name: + update.author_name = executor_id return update return None diff --git a/python/packages/core/agent_framework/_workflows/_executor.py b/python/packages/core/agent_framework/_workflows/_executor.py index 3624a7c267..fad1e5f15e 100644 --- a/python/packages/core/agent_framework/_workflows/_executor.py +++ b/python/packages/core/agent_framework/_workflows/_executor.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import contextlib +import copy import functools import inspect import logging @@ -263,8 +264,9 @@ async def execute( ) # Invoke the handler with the message and context + # Use deepcopy to capture original input state before handler can mutate it with _framework_event_origin(): - invoke_event = ExecutorInvokedEvent(self.id, message) + invoke_event = ExecutorInvokedEvent(self.id, copy.deepcopy(message)) await context.add_event(invoke_event) try: await handler(message, context) @@ -275,9 +277,11 @@ async def execute( await context.add_event(failure_event) raise with _framework_event_origin(): - # Include sent messages as the completion data + # Include sent messages and yielded outputs as the completion data sent_messages = context.get_sent_messages() - completed_event = ExecutorCompletedEvent(self.id, sent_messages if sent_messages else None) + yielded_outputs = context.get_yielded_outputs() + completion_data = sent_messages + yielded_outputs + completed_event = ExecutorCompletedEvent(self.id, completion_data if completion_data else None) await context.add_event(completed_event) def _create_context_for_handler( diff --git a/python/packages/core/agent_framework/_workflows/_workflow_context.py b/python/packages/core/agent_framework/_workflows/_workflow_context.py index 9719ce164a..cffeb02aa0 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_context.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_context.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. +import copy import inspect import logging import uuid @@ -290,6 +291,9 @@ def __init__( # Track messages sent via send_message() for ExecutorCompletedEvent self._sent_messages: list[Any] = [] + # Track outputs yielded via yield_output() for ExecutorCompletedEvent + self._yielded_outputs: list[Any] = [] + # Store trace contexts and source span IDs for linking (supporting multiple sources) self._trace_contexts = trace_contexts or [] self._source_span_ids = source_span_ids or [] @@ -336,6 +340,9 @@ async def yield_output(self, output: T_W_Out) -> None: output: The output to yield. This must conform to the workflow output type(s) declared on this context. """ + # Track yielded output for ExecutorCompletedEvent (deepcopy to capture state at yield time) + self._yielded_outputs.append(copy.deepcopy(output)) + with _framework_event_origin(): event = WorkflowOutputEvent(data=output, source_executor_id=self._executor_id) await self._runner_context.add_event(event) @@ -424,6 +431,14 @@ def get_sent_messages(self) -> list[Any]: """ return self._sent_messages.copy() + def get_yielded_outputs(self) -> list[Any]: + """Get all outputs yielded via yield_output() during this handler execution. + + Returns: + A list of outputs that were yielded as workflow outputs. + """ + return self._yielded_outputs.copy() + @deprecated( "Override `on_checkpoint_save()` methods instead. " "For cross-executor state sharing, use set_shared_state() instead. " diff --git a/python/packages/core/agent_framework/observability.py b/python/packages/core/agent_framework/observability.py index 9dc6e4d4a9..26c261038b 100644 --- a/python/packages/core/agent_framework/observability.py +++ b/python/packages/core/agent_framework/observability.py @@ -321,7 +321,7 @@ def _create_otlp_exporters( if not actual_logs_endpoint and not actual_traces_endpoint and not actual_metrics_endpoint: return exporters - if protocol in ("grpc", "http/protobuf"): + if protocol == "grpc": # Import all gRPC exporters try: from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter as GRPCLogExporter @@ -357,7 +357,7 @@ def _create_otlp_exporters( ) ) - elif protocol == "http": + elif protocol in ("http/protobuf", "http"): # Import all HTTP exporters try: from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter as HTTPLogExporter diff --git a/python/packages/core/agent_framework/openai/_assistants_client.py b/python/packages/core/agent_framework/openai/_assistants_client.py index e790a44940..b6f97371b7 100644 --- a/python/packages/core/agent_framework/openai/_assistants_client.py +++ b/python/packages/core/agent_framework/openai/_assistants_client.py @@ -3,7 +3,7 @@ import json import sys from collections.abc import AsyncIterable, Awaitable, Callable, Mapping, MutableMapping, MutableSequence -from typing import Any +from typing import Any, cast from openai import AsyncOpenAI from openai.types.beta.threads import ( @@ -28,9 +28,11 @@ ChatOptions, ChatResponse, ChatResponseUpdate, + CodeInterpreterToolCallContent, Contents, FunctionCallContent, FunctionResultContent, + MCPServerToolCallContent, Role, TextContent, ToolMode, @@ -377,10 +379,37 @@ def _parse_function_calls_from_assistants(self, event_data: Run, response_id: st if event_data.required_action is not None: for tool_call in event_data.required_action.submit_tool_outputs.tool_calls: + tool_call_any = cast(Any, tool_call) call_id = json.dumps([response_id, tool_call.id]) - function_name = tool_call.function.name - function_arguments = json.loads(tool_call.function.arguments) - contents.append(FunctionCallContent(call_id=call_id, name=function_name, arguments=function_arguments)) + tool_type = getattr(tool_call, "type", None) + if tool_type == "code_interpreter" and getattr(tool_call_any, "code_interpreter", None): + code_input = getattr(tool_call_any.code_interpreter, "input", None) + inputs = ( + [TextContent(text=code_input, raw_representation=tool_call)] if code_input is not None else None + ) + contents.append( + CodeInterpreterToolCallContent( + call_id=call_id, + inputs=inputs, + raw_representation=tool_call, + ) + ) + elif tool_type == "mcp": + contents.append( + MCPServerToolCallContent( + call_id=call_id, + tool_name=getattr(tool_call, "name", "") or "", + server_name=getattr(tool_call, "server_label", None), + arguments=getattr(tool_call, "args", None), + raw_representation=tool_call, + ) + ) + else: + function_name = tool_call.function.name + function_arguments = json.loads(tool_call.function.arguments) + contents.append( + FunctionCallContent(call_id=call_id, name=function_name, arguments=function_arguments) + ) return contents diff --git a/python/packages/core/agent_framework/openai/_chat_client.py b/python/packages/core/agent_framework/openai/_chat_client.py index b7cac3ba20..a2365b58f2 100644 --- a/python/packages/core/agent_framework/openai/_chat_client.py +++ b/python/packages/core/agent_framework/openai/_chat_client.py @@ -183,7 +183,7 @@ def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: translations = { "model_id": "model", "allow_multiple_tool_calls": "parallel_tool_calls", - "max_tokens": "max_output_tokens", + "max_tokens": "max_completion_tokens", } for old_key, new_key in translations.items(): if old_key in run_options and old_key != new_key: @@ -205,8 +205,8 @@ def _prepare_options(self, messages: MutableSequence[ChatMessage], chat_options: run_options.pop("tools", None) run_options.pop("parallel_tool_calls", None) run_options.pop("tool_choice", None) - # tool choice when `tool_choice` is a dict with single key `mode`, extract the mode value - if (tool_choice := run_options.get("tool_choice")) and len(tool_choice.keys()) == 1: + # tool_choice: ToolMode serializes to {"type": "tool_mode", "mode": "..."}, extract mode + if (tool_choice := run_options.get("tool_choice")) and isinstance(tool_choice, dict) and "mode" in tool_choice: run_options["tool_choice"] = tool_choice["mode"] # response format diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index 54a0f5544b..6054c91ded 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -1,6 +1,14 @@ # Copyright (c) Microsoft. All rights reserved. -from collections.abc import AsyncIterable, Awaitable, Callable, Mapping, MutableMapping, MutableSequence, Sequence +from collections.abc import ( + AsyncIterable, + Awaitable, + Callable, + Mapping, + MutableMapping, + MutableSequence, + Sequence, +) from datetime import datetime, timezone from itertools import chain from typing import Any, TypeVar, cast @@ -12,7 +20,9 @@ ParsedResponse, ) from openai.types.responses.response import Response as OpenAIResponse -from openai.types.responses.response_stream_event import ResponseStreamEvent as OpenAIResponseStreamEvent +from openai.types.responses.response_stream_event import ( + ResponseStreamEvent as OpenAIResponseStreamEvent, +) from openai.types.responses.response_usage import ResponseUsage from openai.types.responses.tool_param import ( CodeInterpreter, @@ -20,7 +30,9 @@ Mcp, ToolParam, ) -from openai.types.responses.web_search_tool_param import UserLocation as WebSearchUserLocation +from openai.types.responses.web_search_tool_param import ( + UserLocation as WebSearchUserLocation, +) from openai.types.responses.web_search_tool_param import WebSearchToolParam from pydantic import BaseModel, ValidationError @@ -31,6 +43,7 @@ AIFunction, HostedCodeInterpreterTool, HostedFileSearchTool, + HostedImageGenerationTool, HostedMCPTool, HostedWebSearchTool, ToolProtocol, @@ -42,6 +55,8 @@ ChatResponse, ChatResponseUpdate, CitationAnnotation, + CodeInterpreterToolCallContent, + CodeInterpreterToolResultContent, Contents, DataContent, FunctionApprovalRequestContent, @@ -50,6 +65,10 @@ FunctionResultContent, HostedFileContent, HostedVectorStoreContent, + ImageGenerationToolCallContent, + ImageGenerationToolResultContent, + MCPServerToolCallContent, + MCPServerToolResultContent, Role, TextContent, TextReasoningContent, @@ -57,6 +76,7 @@ UriContent, UsageContent, UsageDetails, + _parse_content, prepare_function_call_results, ) from ..exceptions import ( @@ -131,13 +151,17 @@ async def _inner_get_streaming_response( if "text_format" not in run_options: async for chunk in await client.responses.create(stream=True, **run_options): yield self._parse_chunk_from_openai( - chunk, chat_options=chat_options, function_call_ids=function_call_ids + chunk, + chat_options=chat_options, + function_call_ids=function_call_ids, ) return async with client.responses.stream(**run_options) as response: async for chunk in response: yield self._parse_chunk_from_openai( - chunk, chat_options=chat_options, function_call_ids=function_call_ids + chunk, + chat_options=chat_options, + function_call_ids=function_call_ids, ) except BadRequestError as ex: if ex.code == "content_filter": @@ -314,39 +338,28 @@ def _prepare_tools_for_openai( else None, ) ) + case HostedImageGenerationTool(): + mapped_tool: dict[str, Any] = {"type": "image_generation"} + if tool.options: + option_mapping = { + "image_size": "size", + "media_type": "output_format", + "model_id": "model", + "streaming_count": "partial_images", + } + # count and response_format are not supported by Responses API + for key, value in tool.options.items(): + mapped_key = option_mapping.get(key, key) + mapped_tool[mapped_key] = value + if tool.additional_properties: + mapped_tool.update(tool.additional_properties) + response_tools.append(mapped_tool) case _: logger.debug("Unsupported tool passed (type: %s)", type(tool)) else: # Handle raw dictionary tools tool_dict = tool if isinstance(tool, dict) else dict(tool) - - # Special handling for image_generation tools - if tool_dict.get("type") == "image_generation": - # Create a copy to avoid modifying the original - mapped_tool = tool_dict.copy() - - # Map user-friendly parameter names to OpenAI API parameter names - parameter_mapping = { - "format": "output_format", - "compression": "output_compression", - } - - for user_param, api_param in parameter_mapping.items(): - if user_param in mapped_tool: - # Map the parameter name and remove the old one - mapped_tool[api_param] = mapped_tool.pop(user_param) - - # Validate partial_images parameter for streaming image generation - # OpenAI API requires partial_images to be between 0-3 (inclusive) for image_generation tool - # Reference: https://platform.openai.com/docs/api-reference/responses/create#responses_create-tools-image_generation_tool-partial_images - if "partial_images" in mapped_tool: - partial_images = mapped_tool["partial_images"] - if not isinstance(partial_images, int) or partial_images < 0 or partial_images > 3: - raise ValueError("partial_images must be an integer between 0 and 3 (inclusive).") - - response_tools.append(mapped_tool) - else: - response_tools.append(tool_dict) + response_tools.append(tool_dict) return response_tools @staticmethod @@ -435,20 +448,27 @@ async def _prepare_options( else: run_options.pop("parallel_tool_calls", None) run_options.pop("tool_choice", None) - # tool choice when `tool_choice` is a dict with single key `mode`, extract the mode value - if (tool_choice := run_options.get("tool_choice")) and len(tool_choice.keys()) == 1: + # tool_choice: ToolMode serializes to {"type": "tool_mode", "mode": "..."}, extract mode + if (tool_choice := run_options.get("tool_choice")) and isinstance(tool_choice, dict) and "mode" in tool_choice: run_options["tool_choice"] = tool_choice["mode"] - # additional properties + # additional properties (excluding response_format which is handled separately) additional_options = { - key: value for key, value in chat_options.additional_properties.items() if value is not None + key: value + for key, value in chat_options.additional_properties.items() + if value is not None and key != "response_format" } if additional_options: run_options.update(additional_options) # response format and text config (after additional_properties so user can pass text via additional_properties) - response_format = chat_options.response_format - text_config = run_options.pop("text", None) + # Check both chat_options.response_format and additional_properties for response_format + response_format: Any = ( + chat_options.response_format + if chat_options.response_format is not None + else chat_options.additional_properties.get("response_format") + ) + text_config: Any = run_options.pop("text", None) response_format, text_config = self._prepare_response_and_text_format( response_format=response_format, text_config=text_config ) @@ -551,7 +571,10 @@ def _prepare_content_for_openai( if status := props.get("status"): ret["status"] = status if reasoning_text := props.get("reasoning_text"): - ret["content"] = {"type": "reasoning_text", "text": reasoning_text} + ret["content"] = { + "type": "reasoning_text", + "text": reasoning_text, + } if encrypted_content := props.get("encrypted_content"): ret["encrypted_content"] = encrypted_content return ret @@ -597,9 +620,17 @@ def _prepare_content_for_openai( return file_obj return {} case FunctionCallContent(): + if not content.call_id: + logger.warning(f"FunctionCallContent missing call_id for function '{content.name}'") + return {} + # Use fc_id from additional_properties if available, otherwise fallback to call_id + fc_id = call_id_to_id.get(content.call_id, content.call_id) + # OpenAI Responses API requires IDs to start with `fc_` + if not fc_id.startswith("fc_"): + fc_id = f"fc_{fc_id}" return { "call_id": content.call_id, - "id": call_id_to_id[content.call_id], + "id": fc_id, "type": "function_call", "name": content.name, "arguments": content.arguments, @@ -735,11 +766,17 @@ def _parse_response_from_openai( ) ) case _: - logger.debug("Unparsed annotation type: %s", annotation.type) + logger.debug( + "Unparsed annotation type: %s", + annotation.type, + ) contents.append(text_content) case "refusal": contents.append( - TextContent(text=message_content.refusal, raw_representation=message_content) + TextContent( + text=message_content.refusal, + raw_representation=message_content, + ) ) case "reasoning": # ResponseOutputReasoning if hasattr(item, "content") and item.content: @@ -760,22 +797,40 @@ def _parse_response_from_openai( TextReasoningContent(text=summary.text, raw_representation=summary) # type: ignore[arg-type] ) case "code_interpreter_call": # ResponseOutputCodeInterpreterCall - if hasattr(item, "outputs") and item.outputs: - for code_output in item.outputs: - if code_output.type == "logs": - contents.append(TextContent(text=code_output.logs, raw_representation=item)) - if code_output.type == "image": - contents.append( + call_id = getattr(item, "call_id", None) or getattr(item, "id", None) + outputs: list["Contents"] = [] + if item_outputs := getattr(item, "outputs", None): + for code_output in item_outputs: + if getattr(code_output, "type", None) == "logs": + outputs.append( + TextContent( + text=code_output.logs, + raw_representation=code_output, + ) + ) + elif getattr(code_output, "type", None) == "image": + outputs.append( UriContent( uri=code_output.url, - raw_representation=item, - # no more specific media type then this can be inferred + raw_representation=code_output, media_type="image", ) ) - elif hasattr(item, "code") and item.code: - # fallback if no output was returned is the code: - contents.append(TextContent(text=item.code, raw_representation=item)) + if code := getattr(item, "code", None): + contents.append( + CodeInterpreterToolCallContent( + call_id=call_id, + inputs=[TextContent(text=code, raw_representation=item)], + raw_representation=item, + ) + ) + contents.append( + CodeInterpreterToolResultContent( + call_id=call_id, + outputs=outputs, + raw_representation=item, + ) + ) case "function_call": # ResponseOutputFunctionCall contents.append( FunctionCallContent( @@ -799,31 +854,49 @@ def _parse_response_from_openai( ), ) ) - case "image_generation_call": # ResponseOutputImageGenerationCall - if item.result: - # Handle the result as either a proper data URI or raw base64 string - uri = item.result - media_type = None - if not uri.startswith("data:"): - # Raw base64 string - convert to proper data URI format using helper - uri, media_type = DataContent.create_data_uri_from_base64(uri) - else: - # Parse media type from existing data URI - try: - # Extract media type from data URI (e.g., "data:image/png;base64,...") - if ";" in uri and uri.startswith("data:"): - media_type = uri.split(";")[0].split(":", 1)[1] - except Exception: - # Fallback if parsing fails - media_type = "image" + case "mcp_call": + call_id = item.id + contents.append( + MCPServerToolCallContent( + call_id=call_id, + tool_name=item.name, + server_name=item.server_label, + arguments=item.arguments, + raw_representation=item, + ) + ) + if item.output is not None: contents.append( - DataContent( - uri=uri, - media_type=media_type, + MCPServerToolResultContent( + call_id=call_id, + output=[TextContent(text=item.output)], raw_representation=item, ) ) - # TODO(peterychang): Add support for other content types + case "image_generation_call": # ResponseOutputImageGenerationCall + image_output: DataContent | None = None + if item.result: + base64_data = item.result + image_format = DataContent.detect_image_format_from_base64(base64_data) + image_output = DataContent( + data=base64_data, + media_type=f"image/{image_format}" if image_format else "image/png", + raw_representation=item.result, + ) + image_id = item.id + contents.append( + ImageGenerationToolCallContent( + image_id=image_id, + raw_representation=item, + ) + ) + contents.append( + ImageGenerationToolResultContent( + image_id=image_id, + outputs=image_output, + raw_representation=item, + ) + ) case _: logger.debug("Unparsed output of type: %s: %s", item.type, item) response_message = ChatMessage(role="assistant", contents=contents) @@ -973,7 +1046,10 @@ def _parse_chunk_from_openai( # McpApprovalRequest, # ResponseCustomToolCall, case "function_call": - function_call_ids[event.output_index] = (event_item.call_id, event_item.name) + function_call_ids[event.output_index] = ( + event_item.call_id, + event_item.name, + ) case "mcp_approval_request": contents.append( FunctionApprovalRequestContent( @@ -987,23 +1063,78 @@ def _parse_chunk_from_openai( ), ) ) + case "mcp_call": + call_id = getattr(event_item, "id", None) or getattr(event_item, "call_id", None) or "" + contents.append( + MCPServerToolCallContent( + call_id=call_id, + tool_name=getattr(event_item, "name", "") or "", + server_name=getattr(event_item, "server_label", None), + arguments=getattr(event_item, "arguments", None), + raw_representation=event_item, + ) + ) + result_output = ( + getattr(event_item, "result", None) + or getattr(event_item, "output", None) + or getattr(event_item, "outputs", None) + ) + parsed_output: list[Contents] | None = None + if result_output: + normalized = ( + result_output + if isinstance(result_output, Sequence) + and not isinstance(result_output, (str, bytes, MutableMapping)) + else [result_output] + ) + parsed_output = [_parse_content(output_item) for output_item in normalized] + contents.append( + MCPServerToolResultContent( + call_id=call_id, + output=parsed_output, + raw_representation=event_item, + ) + ) case "code_interpreter_call": # ResponseOutputCodeInterpreterCall + call_id = getattr(event_item, "call_id", None) or getattr(event_item, "id", None) + outputs: list[Contents] = [] if hasattr(event_item, "outputs") and event_item.outputs: for code_output in event_item.outputs: - if code_output.type == "logs": - contents.append(TextContent(text=code_output.logs, raw_representation=event_item)) - if code_output.type == "image": - contents.append( + if getattr(code_output, "type", None) == "logs": + outputs.append( + TextContent( + text=cast(Any, code_output).logs, + raw_representation=code_output, + ) + ) + elif getattr(code_output, "type", None) == "image": + outputs.append( UriContent( - uri=code_output.url, - raw_representation=event_item, - # no more specific media type then this can be inferred + uri=cast(Any, code_output).url, + raw_representation=code_output, media_type="image", ) ) - elif hasattr(event_item, "code") and event_item.code: - # fallback if no output was returned is the code: - contents.append(TextContent(text=event_item.code, raw_representation=event_item)) + if hasattr(event_item, "code") and event_item.code: + contents.append( + CodeInterpreterToolCallContent( + call_id=call_id, + inputs=[ + TextContent( + text=event_item.code, + raw_representation=event_item, + ) + ], + raw_representation=event_item, + ) + ) + contents.append( + CodeInterpreterToolResultContent( + call_id=call_id, + outputs=outputs, + raw_representation=event_item, + ) + ) case "reasoning": # ResponseOutputReasoning if hasattr(event_item, "content") and event_item.content: for index, reasoning_content in enumerate(event_item.content): @@ -1031,7 +1162,10 @@ def _parse_chunk_from_openai( call_id=call_id, name=name, arguments=event.delta, - additional_properties={"output_index": event.output_index, "fc_id": event.item_id}, + additional_properties={ + "output_index": event.output_index, + "fc_id": event.item_id, + }, raw_representation=event, ) ) @@ -1043,14 +1177,27 @@ def _parse_chunk_from_openai( # Use helper function to create data URI from base64 uri, media_type = DataContent.create_data_uri_from_base64(image_base64) + image_output = DataContent( + uri=uri, + media_type=media_type, + additional_properties={ + "partial_image_index": partial_index, + "is_partial_image": True, + }, + raw_representation=event, + ) + + image_id = getattr(event, "item_id", None) contents.append( - DataContent( - uri=uri, - media_type=media_type, - additional_properties={ - "partial_image_index": partial_index, - "is_partial_image": True, - }, + ImageGenerationToolCallContent( + image_id=image_id, + raw_representation=event, + ) + ) + contents.append( + ImageGenerationToolResultContent( + image_id=image_id, + outputs=image_output, raw_representation=event, ) ) diff --git a/python/packages/core/pyproject.toml b/python/packages/core/pyproject.toml index eb7cdcefb7..7096057690 100644 --- a/python/packages/core/pyproject.toml +++ b/python/packages/core/pyproject.toml @@ -4,7 +4,7 @@ description = "Microsoft Agent Framework for building AI Agents with Python. Thi authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251223" +version = "1.0.0b260107" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py index 9b59a3d41a..ec19eaf833 100644 --- a/python/packages/core/tests/azure/test_azure_responses_client.py +++ b/python/packages/core/tests/azure/test_azure_responses_client.py @@ -552,26 +552,24 @@ async def test_azure_responses_client_agent_chat_options_agent_level() -> None: async def test_azure_responses_client_agent_hosted_mcp_tool() -> None: """Integration test for HostedMCPTool with Azure Response Agent using Microsoft Learn MCP.""" - mcp_tool = HostedMCPTool( - name="Microsoft Learn MCP", - url="https://learn.microsoft.com/api/mcp", - description="A Microsoft Learn MCP server for documentation questions", - approval_mode="never_require", - ) - async with ChatAgent( chat_client=AzureOpenAIResponsesClient(credential=AzureCliCredential()), instructions="You are a helpful assistant that can help with microsoft documentation questions.", - tools=[mcp_tool], + tools=HostedMCPTool( + name="Microsoft Learn MCP", + url="https://learn.microsoft.com/api/mcp", + description="A Microsoft Learn MCP server for documentation questions", + approval_mode="never_require", + ), ) as agent: response = await agent.run( "How to create an Azure storage account using az cli?", - max_tokens=200, + # this needs to be high enough to handle the full MCP tool response. + max_tokens=5000, ) assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 + assert response.text # Should contain Azure-related content since it's asking about Azure CLI assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) diff --git a/python/packages/core/tests/core/test_agents.py b/python/packages/core/tests/core/test_agents.py index a6df07cbbe..7611df0cb0 100644 --- a/python/packages/core/tests/core/test_agents.py +++ b/python/packages/core/tests/core/test_agents.py @@ -632,3 +632,93 @@ def echo_thread_info(text: str, **kwargs: Any) -> str: # type: ignore[reportUnk assert result.text == "done" assert captured.get("has_thread") is True assert captured.get("has_message_store") is True + + +async def test_chat_agent_tool_choice_run_level_overrides_agent_level( + chat_client_base: Any, ai_function_tool: Any +) -> None: + """Verify that tool_choice passed to run() overrides agent-level tool_choice.""" + from agent_framework import ChatOptions, ToolMode + + captured_options: list[ChatOptions] = [] + + # Store the original inner method + original_inner = chat_client_base._inner_get_response + + async def capturing_inner( + *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + ) -> ChatResponse: + captured_options.append(chat_options) + return await original_inner(messages=messages, chat_options=chat_options, **kwargs) + + chat_client_base._inner_get_response = capturing_inner + + # Create agent with agent-level tool_choice="auto" and a tool (tools required for tool_choice to be meaningful) + agent = ChatAgent(chat_client=chat_client_base, tool_choice="auto", tools=[ai_function_tool]) + + # Run with run-level tool_choice="required" + await agent.run("Hello", tool_choice="required") + + # Verify the client received tool_choice="required", not "auto" + assert len(captured_options) >= 1 + assert captured_options[0].tool_choice == "required" + assert captured_options[0].tool_choice == ToolMode.REQUIRED_ANY + + +async def test_chat_agent_tool_choice_agent_level_used_when_run_level_not_specified( + chat_client_base: Any, ai_function_tool: Any +) -> None: + """Verify that agent-level tool_choice is used when run() doesn't specify one.""" + from agent_framework import ChatOptions, ToolMode + + captured_options: list[ChatOptions] = [] + + original_inner = chat_client_base._inner_get_response + + async def capturing_inner( + *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + ) -> ChatResponse: + captured_options.append(chat_options) + return await original_inner(messages=messages, chat_options=chat_options, **kwargs) + + chat_client_base._inner_get_response = capturing_inner + + # Create agent with agent-level tool_choice="required" and a tool + agent = ChatAgent(chat_client=chat_client_base, tool_choice="required", tools=[ai_function_tool]) + + # Run without specifying tool_choice + await agent.run("Hello") + + # Verify the client received tool_choice="required" from agent-level + assert len(captured_options) >= 1 + assert captured_options[0].tool_choice == "required" + assert captured_options[0].tool_choice == ToolMode.REQUIRED_ANY + + +async def test_chat_agent_tool_choice_none_at_run_preserves_agent_level( + chat_client_base: Any, ai_function_tool: Any +) -> None: + """Verify that tool_choice=None at run() uses agent-level default.""" + from agent_framework import ChatOptions + + captured_options: list[ChatOptions] = [] + + original_inner = chat_client_base._inner_get_response + + async def capturing_inner( + *, messages: MutableSequence[ChatMessage], chat_options: ChatOptions, **kwargs: Any + ) -> ChatResponse: + captured_options.append(chat_options) + return await original_inner(messages=messages, chat_options=chat_options, **kwargs) + + chat_client_base._inner_get_response = capturing_inner + + # Create agent with agent-level tool_choice="auto" and a tool + agent = ChatAgent(chat_client=chat_client_base, tool_choice="auto", tools=[ai_function_tool]) + + # Run with explicitly passing None (same as not specifying) + await agent.run("Hello", tool_choice=None) + + # Verify the client received tool_choice="auto" from agent-level + assert len(captured_options) >= 1 + assert captured_options[0].tool_choice == "auto" diff --git a/python/packages/core/tests/core/test_tools.py b/python/packages/core/tests/core/test_tools.py index 88c34dc3e8..f70e6ddb56 100644 --- a/python/packages/core/tests/core/test_tools.py +++ b/python/packages/core/tests/core/test_tools.py @@ -10,6 +10,7 @@ from agent_framework import ( AIFunction, HostedCodeInterpreterTool, + HostedImageGenerationTool, HostedMCPTool, ToolProtocol, ai_function, @@ -818,6 +819,30 @@ def test_hosted_code_interpreter_tool_with_unknown_input(): HostedCodeInterpreterTool(inputs={"hosted_file": "file-single"}) +def test_hosted_image_generation_tool_defaults(): + """HostedImageGenerationTool should default name and empty description.""" + tool = HostedImageGenerationTool() + + assert tool.name == "image_generation" + assert tool.description == "" + assert tool.options is None + assert str(tool) == "HostedImageGenerationTool(name=image_generation)" + + +def test_hosted_image_generation_tool_with_options(): + """HostedImageGenerationTool should store options.""" + tool = HostedImageGenerationTool( + description="Generate images", + options={"format": "png", "size": "1024x1024"}, + additional_properties={"quality": "high"}, + ) + + assert tool.name == "image_generation" + assert tool.description == "Generate images" + assert tool.options == {"format": "png", "size": "1024x1024"} + assert tool.additional_properties == {"quality": "high"} + + # region HostedMCPTool tests diff --git a/python/packages/core/tests/core/test_types.py b/python/packages/core/tests/core/test_types.py index 81242147d2..6e6e5bfee7 100644 --- a/python/packages/core/tests/core/test_types.py +++ b/python/packages/core/tests/core/test_types.py @@ -18,6 +18,8 @@ ChatResponse, ChatResponseUpdate, CitationAnnotation, + CodeInterpreterToolCallContent, + CodeInterpreterToolResultContent, DataContent, ErrorContent, FinishReason, @@ -27,6 +29,10 @@ FunctionResultContent, HostedFileContent, HostedVectorStoreContent, + ImageGenerationToolCallContent, + ImageGenerationToolResultContent, + MCPServerToolCallContent, + MCPServerToolResultContent, Role, TextContent, TextReasoningContent, @@ -269,6 +275,78 @@ def test_hosted_file_content_minimal(): assert isinstance(content, BaseContent) +def test_hosted_file_content_optional_fields(): + """HostedFileContent should capture optional media type and name.""" + content = HostedFileContent(file_id="file-789", media_type="image/png", name="plot.png") + + assert content.media_type == "image/png" + assert content.name == "plot.png" + assert content.has_top_level_media_type("image") + assert content.has_top_level_media_type("application") is False + + +# region: CodeInterpreter content + + +def test_code_interpreter_tool_call_content_parses_inputs(): + call = CodeInterpreterToolCallContent( + call_id="call-1", + inputs=[{"type": "text", "text": "print('hi')"}], + ) + + assert call.type == "code_interpreter_tool_call" + assert call.call_id == "call-1" + assert call.inputs and isinstance(call.inputs[0], TextContent) + assert call.inputs[0].text == "print('hi')" + + +def test_code_interpreter_tool_result_content_outputs(): + result = CodeInterpreterToolResultContent( + call_id="call-2", + outputs=[ + {"type": "text", "text": "log output"}, + {"type": "uri", "uri": "https://example.com/file.png", "media_type": "image/png"}, + ], + ) + + assert result.type == "code_interpreter_tool_result" + assert result.call_id == "call-2" + assert result.outputs is not None + assert isinstance(result.outputs[0], TextContent) + assert isinstance(result.outputs[1], UriContent) + + +# region: Image generation content + + +def test_image_generation_tool_contents(): + call = ImageGenerationToolCallContent(image_id="img-1") + outputs = [DataContent(data=b"1234", media_type="image/png")] + result = ImageGenerationToolResultContent(image_id="img-1", outputs=outputs) + + assert call.type == "image_generation_tool_call" + assert call.image_id == "img-1" + assert result.type == "image_generation_tool_result" + assert result.image_id == "img-1" + assert result.outputs and isinstance(result.outputs[0], DataContent) + + +# region: MCP server tool content + + +def test_mcp_server_tool_call_and_result(): + call = MCPServerToolCallContent(call_id="c-1", tool_name="tool", server_name="server", arguments={"x": 1}) + assert call.type == "mcp_server_tool_call" + assert call.arguments == {"x": 1} + + result = MCPServerToolResultContent(call_id="c-1", output=[{"type": "text", "text": "done"}]) + assert result.type == "mcp_server_tool_result" + assert result.output + + with raises(ValueError): + MCPServerToolCallContent(call_id="", tool_name="tool") + + # region: HostedVectorStoreContent @@ -469,6 +547,15 @@ def test_function_approval_serialization_roundtrip(): # The Contents union will need to be handled differently when we fully migrate +def test_function_approval_accepts_mcp_call(): + """Ensure FunctionApprovalRequestContent supports MCP server tool calls.""" + mcp_call = MCPServerToolCallContent(call_id="c-mcp", tool_name="tool", server_name="srv", arguments={"x": 1}) + req = FunctionApprovalRequestContent(id="req-mcp", function_call=mcp_call) + + assert isinstance(req.function_call, MCPServerToolCallContent) + assert req.function_call.call_id == "c-mcp" + + # region BaseContent Serialization @@ -844,6 +931,54 @@ def test_chat_options_and(ai_function_tool, ai_tool) -> None: assert options3.additional_properties.get("p") == 1 +def test_chat_options_and_tool_choice_override() -> None: + """Test that tool_choice from other takes precedence in ChatOptions merge.""" + # Agent-level defaults to "auto" + agent_options = ChatOptions(model_id="gpt-4o", tool_choice="auto") + # Run-level specifies "required" + run_options = ChatOptions(tool_choice="required") + + merged = agent_options & run_options + + # Run-level should override agent-level + assert merged.tool_choice == "required" + assert merged.model_id == "gpt-4o" # Other fields preserved + + +def test_chat_options_and_tool_choice_none_in_other_uses_self() -> None: + """Test that when other.tool_choice is None, self.tool_choice is used.""" + agent_options = ChatOptions(tool_choice="auto") + run_options = ChatOptions(model_id="gpt-4.1") # tool_choice is None + + merged = agent_options & run_options + + # Should keep agent-level tool_choice since run-level is None + assert merged.tool_choice == "auto" + assert merged.model_id == "gpt-4.1" + + +def test_chat_options_and_tool_choice_with_tool_mode() -> None: + """Test ChatOptions merge with ToolMode objects.""" + agent_options = ChatOptions(tool_choice=ToolMode.AUTO) + run_options = ChatOptions(tool_choice=ToolMode.REQUIRED_ANY) + + merged = agent_options & run_options + + assert merged.tool_choice == ToolMode.REQUIRED_ANY + assert merged.tool_choice == "required" # ToolMode equality with string + + +def test_chat_options_and_tool_choice_required_specific_function() -> None: + """Test ChatOptions merge with required specific function.""" + agent_options = ChatOptions(tool_choice="auto") + run_options = ChatOptions(tool_choice=ToolMode.REQUIRED(function_name="get_weather")) + + merged = agent_options & run_options + + assert merged.tool_choice == "required" + assert merged.tool_choice.required_function_name == "get_weather" + + # region Agent Response Fixtures @@ -2085,3 +2220,55 @@ def test_prepare_function_call_results_nested_pydantic_model(): assert "Seattle" in json_result assert "rainy" in json_result assert "18.0" in json_result or "18" in json_result + + +# region prepare_function_call_results with MCP TextContent-like objects + + +def test_prepare_function_call_results_text_content_single(): + """Test that objects with text attribute (like MCP TextContent) are properly handled.""" + from dataclasses import dataclass + + @dataclass + class MockTextContent: + text: str + + result = [MockTextContent("Hello from MCP tool!")] + json_result = prepare_function_call_results(result) + + # Should extract text and serialize as JSON array of strings + assert isinstance(json_result, str) + assert json_result == '["Hello from MCP tool!"]' + + +def test_prepare_function_call_results_text_content_multiple(): + """Test that multiple TextContent-like objects are serialized correctly.""" + from dataclasses import dataclass + + @dataclass + class MockTextContent: + text: str + + result = [MockTextContent("First result"), MockTextContent("Second result")] + json_result = prepare_function_call_results(result) + + # Should extract text from each and serialize as JSON array + assert isinstance(json_result, str) + assert json_result == '["First result", "Second result"]' + + +def test_prepare_function_call_results_text_content_with_non_string_text(): + """Test that objects with non-string text attribute are not treated as TextContent.""" + + class BadTextContent: + def __init__(self): + self.text = 12345 # Not a string! + + result = [BadTextContent()] + json_result = prepare_function_call_results(result) + + # Should not extract text since it's not a string, will serialize the object + assert isinstance(json_result, str) + + +# endregion diff --git a/python/packages/core/tests/openai/test_openai_chat_client.py b/python/packages/core/tests/openai/test_openai_chat_client.py index d2ddc1fb02..18854799fd 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client.py +++ b/python/packages/core/tests/openai/test_openai_chat_client.py @@ -236,6 +236,36 @@ async def test_openai_chat_completion_response() -> None: assert "scientists" in response.text +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_chat_completion_response_params() -> None: + """Test OpenAI chat completion responses.""" + openai_chat_client = OpenAIChatClient() + + assert isinstance(openai_chat_client, ChatClientProtocol) + + messages: list[ChatMessage] = [] + messages.append( + ChatMessage( + role="user", + text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " + "Bonded by their love for the natural world and shared curiosity, they uncovered a " + "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " + "of climate change.", + ) + ) + messages.append(ChatMessage(role="user", text="who are Emily and David?")) + + # Test that the client can be used to get a response + response = await openai_chat_client.get_response( + messages=messages, chat_options=ChatOptions(max_tokens=150, temperature=0.7, top_p=0.9) + ) + + assert response is not None + assert isinstance(response, ChatResponse) + assert "scientists" in response.text + + @pytest.mark.flaky @skip_if_openai_integration_tests_disabled async def test_openai_chat_completion_response_tools() -> None: diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index a3c7ff5323..778ce843ee 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -26,6 +26,8 @@ ChatMessage, ChatResponse, ChatResponseUpdate, + CodeInterpreterToolCallContent, + CodeInterpreterToolResultContent, DataContent, FunctionApprovalRequestContent, FunctionApprovalResponseContent, @@ -34,9 +36,12 @@ HostedCodeInterpreterTool, HostedFileContent, HostedFileSearchTool, + HostedImageGenerationTool, HostedMCPTool, HostedVectorStoreContent, HostedWebSearchTool, + ImageGenerationToolCallContent, + ImageGenerationToolResultContent, MCPStreamableHTTPTool, Role, TextContent, @@ -612,11 +617,14 @@ def test_response_content_creation_with_code_interpreter() -> None: response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore assert len(response.messages[0].contents) == 2 - assert isinstance(response.messages[0].contents[0], TextContent) - assert response.messages[0].contents[0].text == "Code execution log" - assert isinstance(response.messages[0].contents[1], UriContent) - assert response.messages[0].contents[1].uri == "https://example.com/image.png" - assert response.messages[0].contents[1].media_type == "image" + call_content, result_content = response.messages[0].contents + assert isinstance(call_content, CodeInterpreterToolCallContent) + assert call_content.inputs is not None + assert isinstance(call_content.inputs[0], TextContent) + assert isinstance(result_content, CodeInterpreterToolResultContent) + assert result_content.outputs is not None + assert any(isinstance(out, TextContent) for out in result_content.outputs) + assert any(isinstance(out, UriContent) for out in result_content.outputs) def test_response_content_creation_with_function_call() -> None: @@ -761,14 +769,13 @@ def test_prepare_tools_for_openai_with_raw_image_generation() -> None: """Test that raw image_generation tool dict is handled correctly with parameter mapping.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - # Test with raw tool dict using user-friendly parameter names + # Test with raw tool dict using OpenAI parameters directly tool = { "type": "image_generation", "size": "1536x1024", "quality": "high", - "format": "webp", # Will be mapped to output_format - "compression": 75, # Will be mapped to output_compression - "background": "transparent", + "output_format": "webp", + "output_quality": 75, } resp_tools = client._prepare_tools_for_openai([tool]) @@ -780,10 +787,8 @@ def test_prepare_tools_for_openai_with_raw_image_generation() -> None: assert image_tool["type"] == "image_generation" assert image_tool["size"] == "1536x1024" assert image_tool["quality"] == "high" - assert image_tool["background"] == "transparent" - # Check parameter name mapping assert image_tool["output_format"] == "webp" - assert image_tool["output_compression"] == 75 + assert image_tool["output_quality"] == 75 def test_prepare_tools_for_openai_with_raw_image_generation_openai_responses_params() -> None: @@ -797,7 +802,7 @@ def test_prepare_tools_for_openai_with_raw_image_generation_openai_responses_par "model": "gpt-image-1", "input_fidelity": "high", "moderation": "strict", - "partial_images": 2, # Should be integer 0-3 + "output_format": "png", } resp_tools = client._prepare_tools_for_openai([tool]) @@ -815,7 +820,7 @@ def test_prepare_tools_for_openai_with_raw_image_generation_openai_responses_par assert tool_dict["model"] == "gpt-image-1" assert tool_dict["input_fidelity"] == "high" assert tool_dict["moderation"] == "strict" - assert tool_dict["partial_images"] == 2 + assert tool_dict["output_format"] == "png" def test_prepare_tools_for_openai_with_raw_image_generation_minimal() -> None: @@ -836,6 +841,24 @@ def test_prepare_tools_for_openai_with_raw_image_generation_minimal() -> None: assert len(image_tool) == 1 +def test_prepare_tools_for_openai_with_hosted_image_generation() -> None: + """Test HostedImageGenerationTool conversion.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + tool = HostedImageGenerationTool( + description="Generate images", + options={"output_format": "png", "size": "512x512"}, + additional_properties={"quality": "high"}, + ) + + resp_tools = client._prepare_tools_for_openai([tool]) + assert len(resp_tools) == 1 + image_tool = resp_tools[0] + assert image_tool["type"] == "image_generation" + assert image_tool["output_format"] == "png" + assert image_tool["size"] == "512x512" + assert image_tool["quality"] == "high" + + def test_parse_chunk_from_openai_with_mcp_approval_request() -> None: """Test that a streaming mcp_approval_request event is parsed into FunctionApprovalRequestContent.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -1278,9 +1301,11 @@ def test_parse_chunk_from_openai_code_interpreter() -> None: result = client._parse_chunk_from_openai(mock_event_image, chat_options, function_call_ids) # type: ignore assert len(result.contents) == 1 - assert isinstance(result.contents[0], UriContent) - assert result.contents[0].uri == "https://example.com/plot.png" - assert result.contents[0].media_type == "image" + assert isinstance(result.contents[0], CodeInterpreterToolResultContent) + assert result.contents[0].outputs + assert any( + isinstance(out, UriContent) and out.uri == "https://example.com/plot.png" for out in result.contents[0].outputs + ) def test_parse_chunk_from_openai_reasoning() -> None: @@ -1495,12 +1520,16 @@ def test_parse_response_from_openai_image_generation_raw_base64(): with patch.object(client, "_get_metadata_from_response", return_value={}): response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore - # Verify the response contains DataContent with proper URI and media_type - assert len(response.messages[0].contents) == 1 - content = response.messages[0].contents[0] - assert isinstance(content, DataContent) - assert content.uri.startswith("data:image/png;base64,") - assert content.media_type == "image/png" + # Verify the response contains call + result with DataContent output + assert len(response.messages[0].contents) == 2 + call_content, result_content = response.messages[0].contents + assert isinstance(call_content, ImageGenerationToolCallContent) + assert isinstance(result_content, ImageGenerationToolResultContent) + assert result_content.outputs + data_out = result_content.outputs + assert isinstance(data_out, DataContent) + assert data_out.uri.startswith("data:image/png;base64,") + assert data_out.media_type == "image/png" def test_parse_response_from_openai_image_generation_existing_data_uri(): @@ -1521,19 +1550,23 @@ def test_parse_response_from_openai_image_generation_existing_data_uri(): valid_webp_base64 = base64.b64encode(webp_signature + b"VP8 fake_data").decode() mock_item = MagicMock() mock_item.type = "image_generation_call" - mock_item.result = f"data:image/webp;base64,{valid_webp_base64}" + mock_item.result = valid_webp_base64 mock_response.output = [mock_item] with patch.object(client, "_get_metadata_from_response", return_value={}): response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore - # Verify the response contains DataContent with proper media_type parsed from URI - assert len(response.messages[0].contents) == 1 - content = response.messages[0].contents[0] - assert isinstance(content, DataContent) - assert content.uri == f"data:image/webp;base64,{valid_webp_base64}" - assert content.media_type == "image/webp" + # Verify the response contains call + result with DataContent output + assert len(response.messages[0].contents) == 2 + call_content, result_content = response.messages[0].contents + assert isinstance(call_content, ImageGenerationToolCallContent) + assert isinstance(result_content, ImageGenerationToolResultContent) + assert result_content.outputs + data_out = result_content.outputs + assert isinstance(data_out, DataContent) + assert data_out.uri == f"data:image/webp;base64,{valid_webp_base64}" + assert data_out.media_type == "image/webp" def test_parse_response_from_openai_image_generation_format_detection(): @@ -1559,10 +1592,12 @@ def test_parse_response_from_openai_image_generation_format_detection(): with patch.object(client, "_get_metadata_from_response", return_value={}): response_jpeg = client._parse_response_from_openai(mock_response_jpeg, chat_options=ChatOptions()) # type: ignore - content_jpeg = response_jpeg.messages[0].contents[0] - assert isinstance(content_jpeg, DataContent) - assert content_jpeg.media_type == "image/jpeg" - assert "data:image/jpeg;base64," in content_jpeg.uri + result_contents = response_jpeg.messages[0].contents + assert isinstance(result_contents[1], ImageGenerationToolResultContent) + outputs = result_contents[1].outputs + assert outputs and isinstance(outputs, DataContent) + assert outputs.media_type == "image/jpeg" + assert "data:image/jpeg;base64," in outputs.uri # Test WEBP detection webp_signature = b"RIFF" + b"\x00\x00\x00\x00" + b"WEBP" @@ -1583,10 +1618,10 @@ def test_parse_response_from_openai_image_generation_format_detection(): with patch.object(client, "_get_metadata_from_response", return_value={}): response_webp = client._parse_response_from_openai(mock_response_webp, chat_options=ChatOptions()) # type: ignore - content_webp = response_webp.messages[0].contents[0] - assert isinstance(content_webp, DataContent) - assert content_webp.media_type == "image/webp" - assert "data:image/webp;base64," in content_webp.uri + outputs_webp = response_webp.messages[0].contents[1].outputs + assert outputs_webp and isinstance(outputs_webp, DataContent) + assert outputs_webp.media_type == "image/webp" + assert "data:image/webp;base64," in outputs_webp.uri def test_parse_response_from_openai_image_generation_fallback(): @@ -1615,9 +1650,11 @@ def test_parse_response_from_openai_image_generation_fallback(): response = client._parse_response_from_openai(mock_response, chat_options=ChatOptions()) # type: ignore # Verify it falls back to PNG format for unrecognized binary data - assert len(response.messages[0].contents) == 1 - content = response.messages[0].contents[0] - assert isinstance(content, DataContent) + assert len(response.messages[0].contents) == 2 + result_content = response.messages[0].contents[1] + assert isinstance(result_content, ImageGenerationToolResultContent) + assert result_content.outputs + content = result_content.outputs assert content.media_type == "image/png" assert f"data:image/png;base64,{unrecognized_base64}" == content.uri @@ -2153,38 +2190,30 @@ async def test_openai_responses_client_agent_hosted_code_interpreter_tool(): @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_raw_image_generation_tool(): +async def test_openai_responses_client_agent_image_generation_tool(): """Test OpenAI Responses Client agent with raw image_generation tool through OpenAIResponsesClient.""" async with ChatAgent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful assistant that can generate images.", - tools=[{"type": "image_generation", "size": "1024x1024", "quality": "low", "format": "png"}], + tools=HostedImageGenerationTool(options={"image_size": "1024x1024", "media_type": "png"}), ) as agent: # Test image generation functionality response = await agent.run("Generate an image of a cute red panda sitting on a tree branch in a forest.") assert isinstance(response, AgentRunResponse) + assert response.messages - # For image generation, we expect to get some response content - # This could be DataContent with image data, UriContent - assert response.messages is not None and len(response.messages) > 0 - - # Check that we have some kind of content in the response - total_contents = sum(len(message.contents) for message in response.messages) - assert total_contents > 0, f"Expected some content in response messages, got {total_contents} contents" - - # Verify we got image content - look for DataContent with URI starting with "data:image" + # Verify we got image content - look for ImageGenerationToolResultContent image_content_found = False for message in response.messages: for content in message.contents: - uri = getattr(content, "uri", None) - if uri and uri.startswith("data:image"): + if content.type == "image_generation_tool_result" and content.outputs: image_content_found = True break if image_content_found: break - # The test passes if we got image content (which we did based on the visible base64 output) + # The test passes if we got image content assert image_content_found, "Expected to find image content in response" @@ -2306,26 +2335,24 @@ async def test_openai_responses_client_agent_chat_options_agent_level() -> None: async def test_openai_responses_client_agent_hosted_mcp_tool() -> None: """Integration test for HostedMCPTool with OpenAI Response Agent using Microsoft Learn MCP.""" - mcp_tool = HostedMCPTool( - name="Microsoft Learn MCP", - url="https://learn.microsoft.com/api/mcp", - description="A Microsoft Learn MCP server for documentation questions", - approval_mode="never_require", - ) - async with ChatAgent( chat_client=OpenAIResponsesClient(), instructions="You are a helpful assistant that can help with microsoft documentation questions.", - tools=[mcp_tool], + tools=HostedMCPTool( + name="Microsoft Learn MCP", + url="https://learn.microsoft.com/api/mcp", + description="A Microsoft Learn MCP server for documentation questions", + approval_mode="never_require", + ), ) as agent: response = await agent.run( "How to create an Azure storage account using az cli?", - max_tokens=200, + # this needs to be high enough to handle the full MCP tool response. + max_tokens=5000, ) assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 + assert response.text # Should contain Azure-related content since it's asking about Azure CLI assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) @@ -2355,3 +2382,91 @@ async def test_openai_responses_client_agent_local_mcp_tool() -> None: assert len(response.text) > 0 # Should contain Azure-related content since it's asking about Azure CLI assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) + + +class ReleaseBrief(BaseModel): + """Structured output model for release brief testing.""" + + title: str + summary: str + highlights: list[str] + model_config = {"extra": "forbid"} + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_with_response_format_pydantic() -> None: + """Integration test for response_format with Pydantic model using OpenAI Responses Client.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that returns structured JSON responses.", + ) as agent: + response = await agent.run( + "Summarize the following release notes into a ReleaseBrief:\n\n" + "Version 2.0 Release Notes:\n" + "- Added new streaming API for real-time responses\n" + "- Improved error handling with detailed messages\n" + "- Performance boost of 50% in batch processing\n" + "- Fixed memory leak in connection pooling", + response_format=ReleaseBrief, + ) + + # Validate response + assert isinstance(response, AgentRunResponse) + assert response.value is not None + assert isinstance(response.value, ReleaseBrief) + + # Validate structured output fields + brief = response.value + assert len(brief.title) > 0 + assert len(brief.summary) > 0 + assert len(brief.highlights) > 0 + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_with_runtime_json_schema() -> None: + """Integration test for response_format with runtime JSON schema using OpenAI Responses Client.""" + runtime_schema = { + "title": "WeatherDigest", + "type": "object", + "properties": { + "location": {"type": "string"}, + "conditions": {"type": "string"}, + "temperature_c": {"type": "number"}, + "advisory": {"type": "string"}, + }, + "required": ["location", "conditions", "temperature_c", "advisory"], + "additionalProperties": False, + } + + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="Return only JSON that matches the provided schema. Do not add commentary.", + ) as agent: + response = await agent.run( + "Give a brief weather digest for Seattle.", + additional_chat_options={ + "response_format": { + "type": "json_schema", + "json_schema": { + "name": runtime_schema["title"], + "strict": True, + "schema": runtime_schema, + }, + }, + }, + ) + + # Validate response + assert isinstance(response, AgentRunResponse) + assert response.text is not None + + # Parse JSON and validate structure + import json + + parsed = json.loads(response.text) + assert "location" in parsed + assert "conditions" in parsed + assert "temperature_c" in parsed + assert "advisory" in parsed diff --git a/python/packages/core/tests/workflow/test_executor.py b/python/packages/core/tests/workflow/test_executor.py index 3c5558ac30..176c3027c8 100644 --- a/python/packages/core/tests/workflow/test_executor.py +++ b/python/packages/core/tests/workflow/test_executor.py @@ -3,12 +3,14 @@ import pytest from agent_framework import ( + ChatMessage, Executor, ExecutorCompletedEvent, ExecutorInvokedEvent, Message, WorkflowBuilder, WorkflowContext, + executor, handler, ) @@ -182,8 +184,8 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: assert collector_completed.data is None -async def test_executor_completed_event_none_when_no_messages_sent(): - """Test that ExecutorCompletedEvent.data is None when no messages are sent.""" +async def test_executor_completed_event_includes_yielded_outputs(): + """Test that ExecutorCompletedEvent.data includes yielded outputs.""" from typing_extensions import Never from agent_framework import WorkflowOutputEvent @@ -201,9 +203,10 @@ async def handle(self, text: str, ctx: WorkflowContext[Never, str]) -> None: assert len(completed_events) == 1 assert completed_events[0].executor_id == "yielder" - assert completed_events[0].data is None + # Yielded outputs are now included in ExecutorCompletedEvent.data + assert completed_events[0].data == ["TEST"] - # Verify the output was still yielded correctly + # Verify the output was also yielded as WorkflowOutputEvent output_events = [e for e in events if isinstance(e, WorkflowOutputEvent)] assert len(output_events) == 1 assert output_events[0].data == "TEST" @@ -261,3 +264,35 @@ async def handle(self, response: Response, ctx: WorkflowContext) -> None: collector_invoked = next(e for e in invoked_events if e.executor_id == "collector") assert isinstance(collector_invoked.data, Response) assert collector_invoked.data.results == ["HELLO", "HELLO", "HELLO"] + + +async def test_executor_invoked_event_data_not_mutated_by_handler(): + """Test that ExecutorInvokedEvent.data captures original input, not mutated input.""" + + @executor(id="Mutator") + async def mutator(messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: + # The handler mutates the input list by appending new messages + original_len = len(messages) + messages.append(ChatMessage(role="assistant", text="Added by executor")) + await ctx.send_message(messages) + # Verify mutation happened + assert len(messages) == original_len + 1 + + workflow = WorkflowBuilder().set_start_executor(mutator).build() + + # Run with a single user message + input_messages = [ChatMessage(role="user", text="hello")] + events = await workflow.run(input_messages) + + # Find the invoked event for the Mutator executor + invoked_events = [e for e in events if isinstance(e, ExecutorInvokedEvent)] + assert len(invoked_events) == 1 + mutator_invoked = invoked_events[0] + + # The event data should contain ONLY the original input (1 user message) + assert mutator_invoked.executor_id == "Mutator" + assert len(mutator_invoked.data) == 1, ( + f"Expected 1 message (original input), got {len(mutator_invoked.data)}: " + f"{[m.text for m in mutator_invoked.data]}" + ) + assert mutator_invoked.data[0].text == "hello" diff --git a/python/packages/core/tests/workflow/test_workflow_agent.py b/python/packages/core/tests/workflow/test_workflow_agent.py index 3263eb854e..d2ed8d1394 100644 --- a/python/packages/core/tests/workflow/test_workflow_agent.py +++ b/python/packages/core/tests/workflow/test_workflow_agent.py @@ -702,6 +702,84 @@ async def start_executor(messages: list[ChatMessage], ctx: WorkflowContext) -> N assert unique_text_count == 1, f"Response should appear exactly once, but appeared {unique_text_count} times" +class TestWorkflowAgentAuthorName: + """Test cases for author_name enrichment in WorkflowAgent (GitHub issue #1331).""" + + async def test_agent_run_update_event_gets_executor_id_as_author_name(self): + """Test that AgentRunUpdateEvent gets executor_id as author_name when not already set. + + This validates the fix for GitHub issue #1331: agent responses should include + identification of which agent produced them in multi-agent workflows. + """ + # Create workflow with executor that emits AgentRunUpdateEvent without author_name + executor1 = SimpleExecutor(id="my_executor_id", response_text="Response", emit_streaming=False) + workflow = WorkflowBuilder().set_start_executor(executor1).build() + agent = WorkflowAgent(workflow=workflow, name="Test Agent") + + # Collect streaming updates + updates: list[AgentRunResponseUpdate] = [] + async for update in agent.run_stream("Hello"): + updates.append(update) + + # Verify at least one update was received + assert len(updates) >= 1 + + # Verify author_name is set to executor_id + assert updates[0].author_name == "my_executor_id" + + async def test_agent_run_update_event_preserves_existing_author_name(self): + """Test that existing author_name is preserved and not overwritten.""" + + class AuthorNameExecutor(Executor): + """Executor that sets author_name explicitly.""" + + @handler + async def handle_message(self, message: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: + # Emit update with explicit author_name + update = AgentRunResponseUpdate( + contents=[TextContent(text="Response with author")], + role=Role.ASSISTANT, + author_name="custom_author_name", # Explicitly set + message_id=str(uuid.uuid4()), + ) + await ctx.add_event(AgentRunUpdateEvent(executor_id=self.id, data=update)) + + executor = AuthorNameExecutor(id="executor_id") + workflow = WorkflowBuilder().set_start_executor(executor).build() + agent = WorkflowAgent(workflow=workflow, name="Test Agent") + + # Collect streaming updates + updates: list[AgentRunResponseUpdate] = [] + async for update in agent.run_stream("Hello"): + updates.append(update) + + # Verify author_name is preserved (not overwritten with executor_id) + assert len(updates) >= 1 + assert updates[0].author_name == "custom_author_name" + + async def test_multiple_executors_have_distinct_author_names(self): + """Test that multiple executors in a workflow have their own author_name.""" + # Create workflow with two executors + executor1 = SimpleExecutor(id="first_executor", response_text="First", emit_streaming=False) + executor2 = SimpleExecutor(id="second_executor", response_text="Second", emit_streaming=False) + + workflow = WorkflowBuilder().set_start_executor(executor1).add_edge(executor1, executor2).build() + agent = WorkflowAgent(workflow=workflow, name="Multi-Executor Agent") + + # Collect streaming updates + updates: list[AgentRunResponseUpdate] = [] + async for update in agent.run_stream("Hello"): + updates.append(update) + + # Should have updates from both executors + assert len(updates) >= 2 + + # Verify each update has the correct author_name matching its executor + author_names = [u.author_name for u in updates] + assert "first_executor" in author_names + assert "second_executor" in author_names + + class TestWorkflowAgentMergeUpdates: """Test cases specifically for the WorkflowAgent.merge_updates static method.""" diff --git a/python/packages/declarative/pyproject.toml b/python/packages/declarative/pyproject.toml index eeefb80ee5..3b2e2586a9 100644 --- a/python/packages/declarative/pyproject.toml +++ b/python/packages/declarative/pyproject.toml @@ -4,7 +4,7 @@ description = "Declarative specification support for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251223" +version = "1.0.0b260107" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/devui/README.md b/python/packages/devui/README.md index 05fe813276..520b03e56f 100644 --- a/python/packages/devui/README.md +++ b/python/packages/devui/README.md @@ -102,12 +102,32 @@ agents/ └── .env # Optional: shared environment variables ``` +### Importing from External Modules + +If your agents import tools or utilities from sibling directories (e.g., `from tools.helpers import my_tool`), you must set `PYTHONPATH` to include the parent directory: + +```bash +# Project structure: +# backend/ +# ├── agents/ +# │ └── my_agent/ +# │ └── agent.py # contains: from tools.helpers import my_tool +# └── tools/ +# └── helpers.py + +# Run from project root with PYTHONPATH +cd backend +PYTHONPATH=. devui ./agents --port 8080 +``` + +Without `PYTHONPATH`, Python cannot find modules in sibling directories and DevUI will report an import error. + ## Viewing Telemetry (Otel Traces) in DevUI -Agent Framework emits OpenTelemetry (Otel) traces for various operations. You can view these traces in DevUI by enabling tracing when starting the server. +Agent Framework emits OpenTelemetry (Otel) traces for various operations. You can view these traces in DevUI by enabling instrumentation when starting the server. ```bash -devui ./agents --tracing framework +devui ./agents --instrumentation ``` ## OpenAI-Compatible API @@ -196,11 +216,12 @@ Options: --port, -p Port (default: 8080) --host Host (default: 127.0.0.1) --headless API only, no UI - --config YAML config file - --tracing none|framework|workflow|all + --no-open Don't automatically open browser + --instrumentation Enable OpenTelemetry instrumentation --reload Enable auto-reload --mode developer|user (default: developer) --auth Enable Bearer token authentication + --auth-token Custom authentication token ``` ### UI Modes diff --git a/python/packages/devui/agent_framework_devui/__init__.py b/python/packages/devui/agent_framework_devui/__init__.py index 9a480d170e..50010cd9cd 100644 --- a/python/packages/devui/agent_framework_devui/__init__.py +++ b/python/packages/devui/agent_framework_devui/__init__.py @@ -94,7 +94,7 @@ def serve( auto_open: bool = False, cors_origins: list[str] | None = None, ui_enabled: bool = True, - tracing_enabled: bool = False, + instrumentation_enabled: bool = False, mode: str = "developer", auth_enabled: bool = False, auth_token: str | None = None, @@ -109,7 +109,7 @@ def serve( auto_open: Whether to automatically open browser cors_origins: List of allowed CORS origins ui_enabled: Whether to enable the UI - tracing_enabled: Whether to enable OpenTelemetry tracing + instrumentation_enabled: Whether to enable OpenTelemetry instrumentation mode: Server mode - 'developer' (full access, verbose errors) or 'user' (restricted APIs, generic errors) auth_enabled: Whether to enable Bearer token authentication auth_token: Custom authentication token (auto-generated if not provided with auth_enabled=True) @@ -172,22 +172,12 @@ def serve( os.environ["AUTH_REQUIRED"] = "true" os.environ["DEVUI_AUTH_TOKEN"] = auth_token - # Configure tracing environment variables if enabled - if tracing_enabled: - import os - - # Only set if not already configured by user - if not os.environ.get("ENABLE_INSTRUMENTATION"): - os.environ["ENABLE_INSTRUMENTATION"] = "true" - logger.info("Set ENABLE_INSTRUMENTATION=true for tracing") - - if not os.environ.get("ENABLE_SENSITIVE_DATA"): - os.environ["ENABLE_SENSITIVE_DATA"] = "true" - logger.info("Set ENABLE_SENSITIVE_DATA=true for tracing") + # Enable instrumentation if requested + if instrumentation_enabled: + from agent_framework.observability import enable_instrumentation - if not os.environ.get("OTLP_ENDPOINT"): - os.environ["OTLP_ENDPOINT"] = "http://localhost:4317" - logger.info("Set OTLP_ENDPOINT=http://localhost:4317 for tracing") + enable_instrumentation(enable_sensitive_data=True) + logger.info("Enabled Agent Framework instrumentation with sensitive data") # Create server with direct parameters server = DevServer( diff --git a/python/packages/devui/agent_framework_devui/_cli.py b/python/packages/devui/agent_framework_devui/_cli.py index 5bc06ac3c8..261cfe4331 100644 --- a/python/packages/devui/agent_framework_devui/_cli.py +++ b/python/packages/devui/agent_framework_devui/_cli.py @@ -28,7 +28,7 @@ def create_cli_parser() -> argparse.ArgumentParser: devui ./agents # Scan specific directory devui --port 8000 # Custom port devui --headless # API only, no UI - devui --tracing # Enable OpenTelemetry tracing + devui --instrumentation # Enable OpenTelemetry instrumentation """, ) @@ -53,7 +53,7 @@ def create_cli_parser() -> argparse.ArgumentParser: parser.add_argument("--reload", action="store_true", help="Enable auto-reload for development") - parser.add_argument("--tracing", action="store_true", help="Enable OpenTelemetry tracing for Agent Framework") + parser.add_argument("--instrumentation", action="store_true", help="Enable OpenTelemetry instrumentation") parser.add_argument( "--mode", @@ -182,7 +182,7 @@ def main() -> None: host=args.host, auto_open=not args.no_open, ui_enabled=ui_enabled, - tracing_enabled=args.tracing, + instrumentation_enabled=args.instrumentation, mode=mode, auth_enabled=args.auth, auth_token=args.auth_token, # Pass through explicit token only diff --git a/python/packages/devui/agent_framework_devui/_conversations.py b/python/packages/devui/agent_framework_devui/_conversations.py index 512b92f647..86db2172e1 100644 --- a/python/packages/devui/agent_framework_devui/_conversations.py +++ b/python/packages/devui/agent_framework_devui/_conversations.py @@ -176,6 +176,31 @@ async def list_conversations_by_metadata(self, metadata_filter: dict[str, str]) """ pass + @abstractmethod + def add_trace(self, conversation_id: str, trace_event: dict[str, Any]) -> None: + """Add a trace event to the conversation for context inspection. + + Traces capture execution metadata like token usage, timing, and LLM context + that isn't stored in the AgentThread but is useful for debugging. + + Args: + conversation_id: Conversation ID + trace_event: Trace event data (from ResponseTraceEvent.data) + """ + pass + + @abstractmethod + def get_traces(self, conversation_id: str) -> list[dict[str, Any]]: + """Get all trace events for a conversation. + + Args: + conversation_id: Conversation ID + + Returns: + List of trace event dicts, or empty list if not found + """ + pass + class InMemoryConversationStore(ConversationStore): """In-memory conversation storage wrapping AgentThread. @@ -215,6 +240,7 @@ def create_conversation( "metadata": metadata or {}, "created_at": created_at, "items": [], + "traces": [], # Trace events for context inspection (token usage, timing, etc.) } # Initialize item index for this conversation @@ -407,10 +433,20 @@ async def list_items( elif content_type == "function_result": # Function result - create separate ConversationItem call_id = getattr(content, "call_id", None) - # Output is stored in additional_properties - output = "" - if hasattr(content, "additional_properties"): - output = content.additional_properties.get("output", "") + # Output is stored in the 'result' field of FunctionResultContent + result_value = getattr(content, "result", None) + # Convert result to string (it could be dict, list, or other types) + if result_value is None: + output = "" + elif isinstance(result_value, str): + output = result_value + else: + import json + + try: + output = json.dumps(result_value) + except (TypeError, ValueError): + output = str(result_value) if call_id: function_results.append( @@ -556,6 +592,34 @@ def get_thread(self, conversation_id: str) -> AgentThread | None: conv_data = self._conversations.get(conversation_id) return conv_data["thread"] if conv_data else None + def add_trace(self, conversation_id: str, trace_event: dict[str, Any]) -> None: + """Add a trace event to the conversation for context inspection. + + Traces capture execution metadata like token usage, timing, and LLM context + that isn't stored in the AgentThread but is useful for debugging. + + Args: + conversation_id: Conversation ID + trace_event: Trace event data (from ResponseTraceEvent.data) + """ + conv_data = self._conversations.get(conversation_id) + if conv_data: + traces = conv_data.get("traces", []) + traces.append(trace_event) + conv_data["traces"] = traces + + def get_traces(self, conversation_id: str) -> list[dict[str, Any]]: + """Get all trace events for a conversation. + + Args: + conversation_id: Conversation ID + + Returns: + List of trace event dicts, or empty list if not found + """ + conv_data = self._conversations.get(conversation_id) + return conv_data.get("traces", []) if conv_data else [] + async def list_conversations_by_metadata(self, metadata_filter: dict[str, str]) -> list[Conversation]: """Filter conversations by metadata (e.g., agent_id).""" results = [] diff --git a/python/packages/devui/agent_framework_devui/_discovery.py b/python/packages/devui/agent_framework_devui/_discovery.py index a80993b788..9f8fcf0542 100644 --- a/python/packages/devui/agent_framework_devui/_discovery.py +++ b/python/packages/devui/agent_framework_devui/_discovery.py @@ -666,7 +666,16 @@ def _load_module_from_pattern(self, pattern: str) -> tuple[Any | None, Exception logger.debug(f"Successfully imported {pattern}") return module, None - except ModuleNotFoundError: + except ModuleNotFoundError as e: + # Distinguish between "module pattern doesn't exist" vs "module has import errors" + # If the missing module is the pattern itself, it's just not found (try next pattern) + # If the missing module is something else (a dependency), capture the error + missing_module = getattr(e, "name", None) + if missing_module and missing_module != pattern and not pattern.endswith(f".{missing_module}"): + # The module exists but has an import error (missing dependency) + logger.warning(f"Error importing {pattern}: {e}") + return None, e + # The module pattern itself doesn't exist - this is expected, try next pattern logger.debug(f"Import pattern {pattern} not found") return None, None except Exception as e: diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index 1f28c8772c..e63dd014fe 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -4,7 +4,6 @@ import json import logging -import os from collections.abc import AsyncGenerator from typing import Any @@ -45,8 +44,8 @@ def __init__( """ self.entity_discovery = entity_discovery self.message_mapper = message_mapper - self._setup_tracing_provider() - self._setup_agent_framework_tracing() + self._setup_instrumentation_provider() + self._setup_agent_framework_instrumentation() # Use provided conversation store or default to in-memory self.conversation_store = conversation_store or InMemoryConversationStore() @@ -56,7 +55,7 @@ def __init__( self.checkpoint_manager = CheckpointConversationManager(self.conversation_store) - def _setup_tracing_provider(self) -> None: + def _setup_instrumentation_provider(self) -> None: """Set up our own TracerProvider so we can add processors.""" try: from opentelemetry import trace @@ -71,7 +70,7 @@ def _setup_tracing_provider(self) -> None: }) provider = TracerProvider(resource=resource) trace.set_tracer_provider(provider) - logger.info("Set up TracerProvider for server tracing") + logger.info("Set up TracerProvider for instrumentation") else: logger.debug("TracerProvider already exists") @@ -80,25 +79,86 @@ def _setup_tracing_provider(self) -> None: except Exception as e: logger.warning(f"Failed to setup TracerProvider: {e}") - def _setup_agent_framework_tracing(self) -> None: - """Set up Agent Framework's built-in tracing.""" - # Configure Agent Framework tracing only if ENABLE_INSTRUMENTATION is set - if os.environ.get("ENABLE_INSTRUMENTATION"): - try: - from agent_framework.observability import OBSERVABILITY_SETTINGS, configure_otel_providers + def _setup_agent_framework_instrumentation(self) -> None: + """Set up Agent Framework's built-in instrumentation.""" + try: + from agent_framework.observability import OBSERVABILITY_SETTINGS, configure_otel_providers - # Only configure if not already executed + # Configure if instrumentation is enabled (via enable_instrumentation() or env var) + if OBSERVABILITY_SETTINGS.ENABLED: + # Only configure providers if not already executed if not OBSERVABILITY_SETTINGS._executed_setup: - # Run the configure_otel_providers - # This ensures OTLP exporters are created even if env vars were set late - configure_otel_providers(enable_sensitive_data=True) + # Call configure_otel_providers to set up exporters. + # If OTEL_EXPORTER_OTLP_ENDPOINT is set, exporters will be created automatically. + # If not set, no exporters are created (no console spam), but DevUI's + # TracerProvider from _setup_instrumentation_provider() remains active for local capture. + configure_otel_providers(enable_sensitive_data=OBSERVABILITY_SETTINGS.SENSITIVE_DATA_ENABLED) logger.info("Enabled Agent Framework observability") else: logger.debug("Agent Framework observability already configured") + else: + logger.debug("Instrumentation not enabled, skipping observability setup") + except Exception as e: + logger.warning(f"Failed to enable Agent Framework observability: {e}") + + async def _ensure_mcp_connections(self, agent: Any) -> None: + """Ensure MCP tool connections are healthy before agent execution. + + This is a workaround for an Agent Framework bug where MCP tool connections + can become stale (underlying streams closed) but is_connected remains True. + This happens when HTTP streaming responses end and GeneratorExit propagates. + + This method detects stale connections and reconnects them. It's designed to + be a no-op once the Agent Framework fixes this issue upstream. + + Args: + agent: Agent object that may have MCP tools + """ + if not hasattr(agent, "_local_mcp_tools"): + return + + for mcp_tool in agent._local_mcp_tools: + if not getattr(mcp_tool, "is_connected", False): + continue + + tool_name = getattr(mcp_tool, "name", "unknown") + + try: + # Check if underlying write stream is closed + session = getattr(mcp_tool, "session", None) + if session is None: + continue + + write_stream = getattr(session, "_write_stream", None) + if write_stream is None: + continue + + # Detect stale connection: is_connected=True but stream is closed + is_closed = getattr(write_stream, "_closed", False) + if not is_closed: + continue # Connection is healthy + + # Stale connection detected - reconnect + logger.warning(f"MCP tool '{tool_name}' has stale connection (stream closed), reconnecting...") + + # Clean up old connection + try: + if hasattr(mcp_tool, "close"): + await mcp_tool.close() + except Exception as close_err: + logger.debug(f"Error closing stale MCP tool '{tool_name}': {close_err}") + # Force reset state + mcp_tool.is_connected = False + mcp_tool.session = None + + # Reconnect + if hasattr(mcp_tool, "connect"): + await mcp_tool.connect() + logger.info(f"MCP tool '{tool_name}' reconnected successfully") + except Exception as e: - logger.warning(f"Failed to enable Agent Framework observability: {e}") - else: - logger.debug("ENABLE_INSTRUMENTATION not set, skipping observability setup") + # If detection fails, log and continue - let it fail naturally during execution + logger.debug(f"Error checking MCP tool '{tool_name}' connection: {e}") async def discover_entities(self) -> list[EntityInfo]: """Discover all available entities. @@ -192,11 +252,11 @@ async def execute_entity(self, entity_id: str, request: AgentFrameworkRequest) - logger.info(f"Executing {entity_info.type}: {entity_id}") - # Extract session_id from request for trace context - session_id = getattr(request.extra_body, "session_id", None) if request.extra_body else None + # Extract response_id from request for trace context (added by _server.py) + response_id = request.extra_body.get("response_id") if request.extra_body else None # Use simplified trace capture - with capture_traces(session_id=session_id, entity_id=entity_id) as trace_collector: + with capture_traces(response_id=response_id, entity_id=entity_id) as trace_collector: if entity_info.type == "agent": async for event in self._execute_agent(entity_obj, request, trace_collector): yield event @@ -260,6 +320,12 @@ async def _execute_agent( logger.debug(f"Executing agent with text input: {user_message[:100]}...") else: logger.debug(f"Executing agent with multimodal ChatMessage: {type(user_message)}") + + # Workaround for MCP tool stale connection bug (GitHub issue pending) + # When HTTP streaming ends, GeneratorExit can close MCP stdio streams + # but is_connected stays True. Detect and reconnect before execution. + await self._ensure_mcp_connections(agent) + # Check if agent supports streaming if hasattr(agent, "run_stream") and callable(agent.run_stream): # Use Agent Framework's native streaming with optional thread diff --git a/python/packages/devui/agent_framework_devui/_mapper.py b/python/packages/devui/agent_framework_devui/_mapper.py index 5adff1cd2f..021a4a4549 100644 --- a/python/packages/devui/agent_framework_devui/_mapper.py +++ b/python/packages/devui/agent_framework_devui/_mapper.py @@ -12,6 +12,7 @@ from typing import Any, Union from uuid import uuid4 +from agent_framework import ChatMessage, TextContent from openai.types.responses import ( Response, ResponseContentPartAddedEvent, @@ -225,27 +226,128 @@ async def aggregate_to_response(self, events: Sequence[Any], request: AgentFrame Final aggregated OpenAI response """ try: - # Extract text content from events - content_parts = [] + # Collect output items in order + output_items: list[Any] = [] + + # Track text content parts per message (keyed by item_id) + text_parts_by_message: dict[str, list[str]] = {} + + # Track function calls (keyed by call_id) to accumulate arguments + function_calls: dict[str, dict[str, Any]] = {} + + # Track function results (keyed by call_id) + function_results: dict[str, dict[str, Any]] = {} for event in events: - # Extract delta text from ResponseTextDeltaEvent - if hasattr(event, "delta") and hasattr(event, "type") and event.type == "response.output_text.delta": - content_parts.append(event.delta) - - # Combine content - full_content = "".join(content_parts) - - # Create proper OpenAI Response - response_output_text = ResponseOutputText(type="output_text", text=full_content, annotations=[]) - - response_output_message = ResponseOutputMessage( - type="message", - role="assistant", - content=[response_output_text], - id=f"msg_{uuid.uuid4().hex[:8]}", - status="completed", - ) + event_type = getattr(event, "type", None) + + # Handle text deltas - accumulate text per message + if event_type == "response.output_text.delta": + item_id = getattr(event, "item_id", "default") + if item_id not in text_parts_by_message: + text_parts_by_message[item_id] = [] + text_parts_by_message[item_id].append(event.delta) + + # Handle output_item.added events (function_call, message, etc.) + elif event_type == "response.output_item.added": + item = getattr(event, "item", None) + if item: + # Handle both object and dict formats + item_type = item.get("type") if isinstance(item, dict) else getattr(item, "type", None) + + # Track function calls to accumulate their arguments + if item_type == "function_call": + # Handle both object and dict formats + if isinstance(item, dict): + call_id = item.get("call_id") or item.get("id") + if call_id: + function_calls[call_id] = { + "id": item.get("id", call_id), + "call_id": call_id, + "name": item.get("name", ""), + "arguments": item.get("arguments", ""), + "type": "function_call", + "status": item.get("status", "completed"), + } + else: + call_id = getattr(item, "call_id", None) or getattr(item, "id", None) + if call_id: + function_calls[call_id] = { + "id": getattr(item, "id", call_id), + "call_id": call_id, + "name": getattr(item, "name", ""), + "arguments": getattr(item, "arguments", ""), + "type": "function_call", + "status": getattr(item, "status", "completed"), + } + + # Other output items (message, etc.) - track for later + elif item_type == "message": + # Messages will be built from text_parts_by_message + pass + + # Handle function call arguments delta - accumulate arguments + elif event_type == "response.function_call_arguments.delta": + item_id = getattr(event, "item_id", None) + delta = getattr(event, "delta", "") + # item_id for function calls is the call_id + if item_id and item_id in function_calls: + function_calls[item_id]["arguments"] += delta + + # Handle function result complete events + elif event_type == "response.function_result.complete": + call_id = getattr(event, "call_id", None) + if call_id: + function_results[call_id] = { + "type": "function_call_output", + "call_id": call_id, + "output": getattr(event, "output", ""), + "status": getattr(event, "status", "completed"), + } + + # Build output array in order: function_calls, then final message + + # Add function call items + for _call_id, fc_data in function_calls.items(): + output_items.append(ResponseFunctionToolCall(**fc_data)) + + # Note: function_call_output items are NOT added to output array + # In OpenAI's Responses API, function results are user inputs, not assistant outputs + # The function_results dict is kept for potential future use or debugging + # but we don't include them in the Response output + _ = function_results # Acknowledge but don't use + + # Build final text message from accumulated deltas + # Combine all text parts (usually there's just one message) + all_text_parts = [] + for _item_id, parts in text_parts_by_message.items(): + all_text_parts.extend(parts) + + full_content = "".join(all_text_parts) + + # Only add message if there's text content + if full_content: + response_output_text = ResponseOutputText(type="output_text", text=full_content, annotations=[]) + response_output_message = ResponseOutputMessage( + type="message", + role="assistant", + content=[response_output_text], + id=f"msg_{uuid.uuid4().hex[:8]}", + status="completed", + ) + output_items.append(response_output_message) + + # If no output items at all, create an empty message + if not output_items: + response_output_text = ResponseOutputText(type="output_text", text="", annotations=[]) + response_output_message = ResponseOutputMessage( + type="message", + role="assistant", + content=[response_output_text], + id=f"msg_{uuid.uuid4().hex[:8]}", + status="completed", + ) + output_items.append(response_output_message) # Get usage from accumulator (OpenAI standard) request_id = str(id(request)) @@ -278,7 +380,7 @@ async def aggregate_to_response(self, events: Sequence[Any], request: AgentFrame object="response", created_at=datetime.now().timestamp(), model=request.model or "devui", - output=[response_output_message], + output=output_items, usage=usage, parallel_tool_calls=False, tool_choice="none", @@ -501,7 +603,7 @@ async def _convert_agent_update(self, update: Any, context: dict[str, Any]) -> S return events # Check if we're streaming text content - has_text_content = any(content.__class__.__name__ == "TextContent" for content in update.contents) + has_text_content = any(isinstance(content, TextContent) for content in update.contents) # Check if we're in an executor context with an existing item executor_id = context.get("current_executor_id") @@ -791,17 +893,35 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> # Extract text from output data based on type text = None - if hasattr(output_data, "__class__") and output_data.__class__.__name__ == "ChatMessage": + if isinstance(output_data, ChatMessage): # Handle ChatMessage (from Magentic and AgentExecutor with output_response=True) text = getattr(output_data, "text", None) if not text: # Fallback to string representation text = str(output_data) + elif isinstance(output_data, list): + # Handle list of ChatMessage objects (from Magentic yield_output([final_answer])) + text_parts = [] + for item in output_data: + if isinstance(item, ChatMessage): + item_text = getattr(item, "text", None) + if item_text: + text_parts.append(item_text) + else: + text_parts.append(str(item)) + elif isinstance(item, str): + text_parts.append(item) + else: + try: + text_parts.append(json.dumps(item, indent=2)) + except (TypeError, ValueError): + text_parts.append(str(item)) + text = "\n".join(text_parts) if text_parts else str(output_data) elif isinstance(output_data, str): # String output text = output_data else: - # Object/dict/list → JSON string + # Object/dict → JSON string try: text = json.dumps(output_data, indent=2) except (TypeError, ValueError): @@ -1081,275 +1201,6 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> return [trace_event] - # Handle Magentic-specific events - if event_class == "MagenticAgentDeltaEvent": - agent_id = getattr(event, "agent_id", "unknown_agent") - text = getattr(event, "text", None) - - if text: - # Check if we're inside an executor - route to executor's item - # This prevents duplicate timeline entries (executor + inner agent) - current_executor_id = context.get("current_executor_id") - executor_item_key = f"exec_item_{current_executor_id}" if current_executor_id else None - - if executor_item_key and executor_item_key in context: - # Route delta to the executor's item instead of creating a new message item - item_id = context[executor_item_key] - - # Emit text delta event routed to the executor's item - return [ - ResponseTextDeltaEvent( - type="response.output_text.delta", - output_index=context.get("output_index", 0), - content_index=0, - item_id=item_id, - delta=text, - logprobs=[], - sequence_number=self._next_sequence(context), - ) - ] - - # Fallback: No executor context - create separate message item (original behavior) - # This handles cases where MagenticAgentDeltaEvent is emitted outside an executor - events = [] - - # Track Magentic agent messages separately from regular messages - # Use timestamp to ensure uniqueness for multiple runs of same agent - magentic_key = f"magentic_message_{agent_id}" - - # Check if this is the first delta from this agent (need to create message container) - if magentic_key not in context: - # Create a unique message ID for this agent's streaming session - message_id = f"msg_{agent_id}_{uuid4().hex[:8]}" - context[magentic_key] = message_id - context["output_index"] = context.get("output_index", -1) + 1 - - # Import required types for creating message containers - from openai.types.responses import ResponseOutputMessage, ResponseOutputText - from openai.types.responses.response_content_part_added_event import ( - ResponseContentPartAddedEvent, - ) - from openai.types.responses.response_output_item_added_event import ResponseOutputItemAddedEvent - - # Emit message output item (container for the agent's message) - # This matches what _convert_agent_update does for regular agents - events.append( - ResponseOutputItemAddedEvent( - type="response.output_item.added", - output_index=context["output_index"], - sequence_number=self._next_sequence(context), - item=ResponseOutputMessage( - type="message", - id=message_id, - role="assistant", - content=[], - status="in_progress", - # Add metadata to identify this as a Magentic agent message - metadata={"agent_id": agent_id, "source": "magentic"}, # type: ignore[call-arg] - ), - ) - ) - - # Add content part for text (establishes the text container) - events.append( - ResponseContentPartAddedEvent( - type="response.content_part.added", - output_index=context["output_index"], - content_index=0, - item_id=message_id, - sequence_number=self._next_sequence(context), - part=ResponseOutputText(type="output_text", text="", annotations=[]), - ) - ) - - # Get the message ID for this agent - message_id = context[magentic_key] - - # Emit text delta event using the message ID (matches regular agent behavior) - events.append( - ResponseTextDeltaEvent( - type="response.output_text.delta", - output_index=context["output_index"], - content_index=0, # Always 0 for single text content - item_id=message_id, - delta=text, - logprobs=[], - sequence_number=self._next_sequence(context), - ) - ) - return events - - # Handle function calls from Magentic agents - if getattr(event, "function_call_id", None) and getattr(event, "function_call_name", None): - # Handle function call initiation - function_call_id = getattr(event, "function_call_id", None) - function_call_name = getattr(event, "function_call_name", None) - function_call_arguments = getattr(event, "function_call_arguments", None) - - # Track function call for accumulating arguments - context["active_function_calls"][function_call_id] = { - "item_id": function_call_id, - "name": function_call_name, - "arguments_chunks": [], - } - - # Emit function call output item - return [ - ResponseOutputItemAddedEvent( - type="response.output_item.added", - item=ResponseFunctionToolCall( - id=function_call_id, - call_id=function_call_id, - name=function_call_name, - arguments=json.dumps(function_call_arguments) if function_call_arguments else "", - type="function_call", - status="in_progress", - ), - output_index=context["output_index"], - sequence_number=self._next_sequence(context), - ) - ] - - # For other non-text deltas, emit as trace for debugging - return [ - ResponseTraceEventComplete( - type="response.trace.completed", - data={ - "trace_type": "magentic_delta", - "agent_id": agent_id, - "function_call_id": getattr(event, "function_call_id", None), - "function_call_name": getattr(event, "function_call_name", None), - "function_result_id": getattr(event, "function_result_id", None), - "timestamp": datetime.now().isoformat(), - }, - span_id=f"magentic_delta_{uuid4().hex[:8]}", - item_id=context["item_id"], - output_index=context.get("output_index", 0), - sequence_number=self._next_sequence(context), - ) - ] - - if event_class == "MagenticAgentMessageEvent": - agent_id = getattr(event, "agent_id", "unknown_agent") - message = getattr(event, "message", None) - - # Check if we're inside an executor - if so, deltas were already routed there - # We don't need to emit a separate message completion event - current_executor_id = context.get("current_executor_id") - executor_item_key = f"exec_item_{current_executor_id}" if current_executor_id else None - - if executor_item_key and executor_item_key in context: - # Deltas were routed to executor item - no separate message item to complete - # The executor's output_item.done will mark completion - logger.debug( - f"MagenticAgentMessageEvent from {agent_id} - " - f"deltas routed to executor {current_executor_id}, skipping" - ) - return [] - - # Fallback: Handle case where we created a separate message item (no executor context) - magentic_key = f"magentic_message_{agent_id}" - - # Check if we were streaming for this agent - if magentic_key in context: - # Mark the streaming message as complete - message_id = context[magentic_key] - - # Import required types - from openai.types.responses import ResponseOutputMessage - from openai.types.responses.response_output_item_done_event import ResponseOutputItemDoneEvent - - # Extract text from ChatMessage for the completed message - text = None - if message and hasattr(message, "text"): - text = message.text - - # Emit output_item.done to mark message as complete - events = [ - ResponseOutputItemDoneEvent( - type="response.output_item.done", - output_index=context["output_index"], - sequence_number=self._next_sequence(context), - item=ResponseOutputMessage( - type="message", - id=message_id, - role="assistant", - content=[], # Content already streamed via deltas - status="completed", - metadata={"agent_id": agent_id, "source": "magentic"}, # type: ignore[call-arg] - ), - ) - ] - - # Clean up context for this agent - del context[magentic_key] - - logger.debug(f"MagenticAgentMessageEvent from {agent_id} marked streaming message as complete") - return events - # No streaming occurred, create a complete message (shouldn't happen normally) - # Extract text from ChatMessage - text = None - if message and hasattr(message, "text"): - text = message.text - - if text: - # Emit as output item for this agent - from openai.types.responses import ResponseOutputMessage, ResponseOutputText - from openai.types.responses.response_output_item_added_event import ResponseOutputItemAddedEvent - - context["output_index"] = context.get("output_index", -1) + 1 - - text_content = ResponseOutputText(type="output_text", text=text, annotations=[]) - - output_message = ResponseOutputMessage( - type="message", - id=f"msg_{agent_id}_{uuid4().hex[:8]}", - role="assistant", - content=[text_content], - status="completed", - metadata={"agent_id": agent_id, "source": "magentic"}, # type: ignore[call-arg] - ) - - logger.debug( - f"MagenticAgentMessageEvent from {agent_id} converted to output_item.added (non-streaming)" - ) - return [ - ResponseOutputItemAddedEvent( - type="response.output_item.added", - item=output_message, - output_index=context["output_index"], - sequence_number=self._next_sequence(context), - ) - ] - - if event_class == "MagenticOrchestratorMessageEvent": - orchestrator_id = getattr(event, "orchestrator_id", "orchestrator") - message = getattr(event, "message", None) - kind = getattr(event, "kind", "unknown") - - # Extract text from ChatMessage - text = None - if message and hasattr(message, "text"): - text = message.text - - # Emit as trace event for orchestrator messages (typically task ledger, instructions) - return [ - ResponseTraceEventComplete( - type="response.trace.completed", - data={ - "trace_type": "magentic_orchestrator", - "orchestrator_id": orchestrator_id, - "kind": kind, - "text": text or "", - "timestamp": datetime.now().isoformat(), - }, - span_id=f"magentic_orch_{uuid4().hex[:8]}", - item_id=context["item_id"], - output_index=context.get("output_index", 0), - sequence_number=self._next_sequence(context), - ) - ] - # For unknown/legacy events, still emit as workflow event for backward compatibility # Get event data and serialize if it's a SerializationMixin raw_event_data = getattr(event, "data", None) diff --git a/python/packages/devui/agent_framework_devui/_server.py b/python/packages/devui/agent_framework_devui/_server.py index b3a7c751b6..146db9b33d 100644 --- a/python/packages/devui/agent_framework_devui/_server.py +++ b/python/packages/devui/agent_framework_devui/_server.py @@ -407,7 +407,7 @@ async def get_meta() -> MetaResponse: framework="agent_framework", runtime="python", # Python DevUI backend capabilities={ - "tracing": os.getenv("ENABLE_INSTRUMENTATION") == "true", + "instrumentation": os.getenv("ENABLE_INSTRUMENTATION") == "true", "openai_proxy": openai_executor.is_configured, "deployment": True, # Deployment feature is available }, @@ -748,6 +748,11 @@ async def create_response(request: AgentFrameworkRequest, raw_request: Request) response_id = f"resp_{uuid.uuid4().hex[:8]}" logger.info(f"[CANCELLATION] Creating response {response_id} for entity {entity_id}") + # Inject response_id into extra_body for trace context + if request.extra_body is None: + request.extra_body = {} + request.extra_body["response_id"] = response_id + return StreamingResponse( self._stream_with_cancellation(executor, request, response_id), media_type="text/event-stream", @@ -1000,10 +1005,16 @@ async def list_conversation_items( logger.warning(f"Unexpected item type: {type(item)}, converting to dict") serialized_items.append(dict(item)) + # Get stored traces for context inspection (DevUI extension) + traces = executor.conversation_store.get_traces(conversation_id) + return { "object": "list", "data": serialized_items, "has_more": has_more, + "metadata": { + "traces": traces, # Trace events for token usage, timing, LLM context + }, } except ValueError as e: raise HTTPException(status_code=404, detail=str(e)) from e @@ -1080,10 +1091,22 @@ async def _stream_execution( # Collect events for final response.completed event events = [] + # Get conversation_id for trace storage + conversation_id = request._get_conversation_id() + # Stream all events async for event in executor.execute_streaming(request): events.append(event) + # Store trace events for context inspection (persisted with conversation) + if conversation_id and hasattr(event, "type") and event.type == "response.trace.completed": + try: + trace_data = event.data if hasattr(event, "data") else None + if trace_data: + executor.conversation_store.add_trace(conversation_id, trace_data) + except Exception as e: + logger.debug(f"Failed to store trace event: {e}") + # IMPORTANT: Check model_dump_json FIRST because to_json() can have newlines (pretty-printing) # which breaks SSE format. model_dump_json() returns single-line JSON. if hasattr(event, "model_dump_json"): diff --git a/python/packages/devui/agent_framework_devui/_tracing.py b/python/packages/devui/agent_framework_devui/_tracing.py index 83abd2fcb7..3fc45398bf 100644 --- a/python/packages/devui/agent_framework_devui/_tracing.py +++ b/python/packages/devui/agent_framework_devui/_tracing.py @@ -18,14 +18,14 @@ class SimpleTraceCollector(SpanExporter): """Simple trace collector that captures spans for direct yielding.""" - def __init__(self, session_id: str | None = None, entity_id: str | None = None) -> None: + def __init__(self, response_id: str | None = None, entity_id: str | None = None) -> None: """Initialize trace collector. Args: - session_id: Session identifier for context + response_id: Response identifier for grouping traces by turn entity_id: Entity identifier for context """ - self.session_id = session_id + self.response_id = response_id self.entity_id = entity_id self.collected_events: list[ResponseTraceEvent] = [] @@ -93,7 +93,7 @@ def _convert_span_to_trace_event(self, span: Any) -> ResponseTraceEvent | None: "duration_ms": duration_ms, "attributes": dict(span.attributes) if span.attributes else {}, "status": str(span.status.status_code) if hasattr(span, "status") else "OK", - "session_id": self.session_id, + "response_id": self.response_id, "entity_id": self.entity_id, } @@ -121,18 +121,18 @@ def _convert_span_to_trace_event(self, span: Any) -> ResponseTraceEvent | None: @contextmanager def capture_traces( - session_id: str | None = None, entity_id: str | None = None + response_id: str | None = None, entity_id: str | None = None ) -> Generator[SimpleTraceCollector, None, None]: """Context manager to capture traces during execution. Args: - session_id: Session identifier for context + response_id: Response identifier for grouping traces by turn entity_id: Entity identifier for context Yields: SimpleTraceCollector instance to get trace events from """ - collector = SimpleTraceCollector(session_id, entity_id) + collector = SimpleTraceCollector(response_id, entity_id) try: from opentelemetry import trace @@ -146,7 +146,7 @@ def capture_traces( # Check if this is a real TracerProvider (not the default NoOpTracerProvider) if isinstance(provider, TracerProvider): provider.add_span_processor(processor) - logger.debug(f"Added trace collector to TracerProvider for session: {session_id}, entity: {entity_id}") + logger.debug(f"Added trace collector to TracerProvider for response: {response_id}, entity: {entity_id}") try: yield collector diff --git a/python/packages/devui/agent_framework_devui/models/_openai_custom.py b/python/packages/devui/agent_framework_devui/models/_openai_custom.py index ac0e74034a..e59d72b892 100644 --- a/python/packages/devui/agent_framework_devui/models/_openai_custom.py +++ b/python/packages/devui/agent_framework_devui/models/_openai_custom.py @@ -390,7 +390,7 @@ class MetaResponse(BaseModel): """Backend runtime/language - 'python' or 'dotnet' for deployment guides and feature availability.""" capabilities: dict[str, bool] = {} - """Server capabilities (e.g., tracing, openai_proxy).""" + """Server capabilities (e.g., instrumentation, openai_proxy).""" auth_required: bool = False """Whether the server requires Bearer token authentication.""" diff --git a/python/packages/devui/agent_framework_devui/ui/assets/index.css b/python/packages/devui/agent_framework_devui/ui/assets/index.css index df27c998b2..504bd04d6c 100644 --- a/python/packages/devui/agent_framework_devui/ui/assets/index.css +++ b/python/packages/devui/agent_framework_devui/ui/assets/index.css @@ -1 +1 @@ -/*! tailwindcss v4.1.12 | MIT License | https://tailwindcss.com */@layer properties{@supports (((-webkit-hyphens:none)) and (not (margin-trim:inline))) or ((-moz-orient:inline) and (not (color:rgb(from red r g b)))){*,:before,:after,::backdrop{--tw-translate-x:0;--tw-translate-y:0;--tw-translate-z:0;--tw-scale-x:1;--tw-scale-y:1;--tw-scale-z:1;--tw-rotate-x:initial;--tw-rotate-y:initial;--tw-rotate-z:initial;--tw-skew-x:initial;--tw-skew-y:initial;--tw-space-y-reverse:0;--tw-space-x-reverse:0;--tw-border-style:solid;--tw-leading:initial;--tw-font-weight:initial;--tw-tracking:initial;--tw-shadow:0 0 #0000;--tw-shadow-color:initial;--tw-shadow-alpha:100%;--tw-inset-shadow:0 0 #0000;--tw-inset-shadow-color:initial;--tw-inset-shadow-alpha:100%;--tw-ring-color:initial;--tw-ring-shadow:0 0 #0000;--tw-inset-ring-color:initial;--tw-inset-ring-shadow:0 0 #0000;--tw-ring-inset:initial;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-offset-shadow:0 0 #0000;--tw-outline-style:solid;--tw-blur:initial;--tw-brightness:initial;--tw-contrast:initial;--tw-grayscale:initial;--tw-hue-rotate:initial;--tw-invert:initial;--tw-opacity:initial;--tw-saturate:initial;--tw-sepia:initial;--tw-drop-shadow:initial;--tw-drop-shadow-color:initial;--tw-drop-shadow-alpha:100%;--tw-drop-shadow-size:initial;--tw-backdrop-blur:initial;--tw-backdrop-brightness:initial;--tw-backdrop-contrast:initial;--tw-backdrop-grayscale:initial;--tw-backdrop-hue-rotate:initial;--tw-backdrop-invert:initial;--tw-backdrop-opacity:initial;--tw-backdrop-saturate:initial;--tw-backdrop-sepia:initial;--tw-duration:initial;--tw-ease:initial;--tw-animation-delay:0s;--tw-animation-direction:normal;--tw-animation-duration:initial;--tw-animation-fill-mode:none;--tw-animation-iteration-count:1;--tw-enter-blur:0;--tw-enter-opacity:1;--tw-enter-rotate:0;--tw-enter-scale:1;--tw-enter-translate-x:0;--tw-enter-translate-y:0;--tw-exit-blur:0;--tw-exit-opacity:1;--tw-exit-rotate:0;--tw-exit-scale:1;--tw-exit-translate-x:0;--tw-exit-translate-y:0}}}@layer theme{:root,:host{--font-sans:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-mono:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--color-red-50:oklch(97.1% .013 17.38);--color-red-100:oklch(93.6% .032 17.717);--color-red-200:oklch(88.5% .062 18.334);--color-red-400:oklch(70.4% .191 22.216);--color-red-500:oklch(63.7% .237 25.331);--color-red-600:oklch(57.7% .245 27.325);--color-red-700:oklch(50.5% .213 27.518);--color-red-800:oklch(44.4% .177 26.899);--color-red-900:oklch(39.6% .141 25.723);--color-red-950:oklch(25.8% .092 26.042);--color-orange-50:oklch(98% .016 73.684);--color-orange-100:oklch(95.4% .038 75.164);--color-orange-200:oklch(90.1% .076 70.697);--color-orange-300:oklch(83.7% .128 66.29);--color-orange-400:oklch(75% .183 55.934);--color-orange-500:oklch(70.5% .213 47.604);--color-orange-600:oklch(64.6% .222 41.116);--color-orange-700:oklch(55.3% .195 38.402);--color-orange-800:oklch(47% .157 37.304);--color-orange-900:oklch(40.8% .123 38.172);--color-orange-950:oklch(26.6% .079 36.259);--color-amber-50:oklch(98.7% .022 95.277);--color-amber-100:oklch(96.2% .059 95.617);--color-amber-200:oklch(92.4% .12 95.746);--color-amber-300:oklch(87.9% .169 91.605);--color-amber-400:oklch(82.8% .189 84.429);--color-amber-500:oklch(76.9% .188 70.08);--color-amber-600:oklch(66.6% .179 58.318);--color-amber-700:oklch(55.5% .163 48.998);--color-amber-800:oklch(47.3% .137 46.201);--color-amber-900:oklch(41.4% .112 45.904);--color-amber-950:oklch(27.9% .077 45.635);--color-yellow-100:oklch(97.3% .071 103.193);--color-yellow-200:oklch(94.5% .129 101.54);--color-yellow-400:oklch(85.2% .199 91.936);--color-yellow-600:oklch(68.1% .162 75.834);--color-yellow-700:oklch(55.4% .135 66.442);--color-green-50:oklch(98.2% .018 155.826);--color-green-100:oklch(96.2% .044 156.743);--color-green-200:oklch(92.5% .084 155.995);--color-green-300:oklch(87.1% .15 154.449);--color-green-400:oklch(79.2% .209 151.711);--color-green-500:oklch(72.3% .219 149.579);--color-green-600:oklch(62.7% .194 149.214);--color-green-700:oklch(52.7% .154 150.069);--color-green-800:oklch(44.8% .119 151.328);--color-green-900:oklch(39.3% .095 152.535);--color-green-950:oklch(26.6% .065 152.934);--color-emerald-50:oklch(97.9% .021 166.113);--color-emerald-100:oklch(95% .052 163.051);--color-emerald-200:oklch(90.5% .093 164.15);--color-emerald-600:oklch(59.6% .145 163.225);--color-emerald-700:oklch(50.8% .118 165.612);--color-emerald-800:oklch(43.2% .095 166.913);--color-blue-50:oklch(97% .014 254.604);--color-blue-100:oklch(93.2% .032 255.585);--color-blue-200:oklch(88.2% .059 254.128);--color-blue-300:oklch(80.9% .105 251.813);--color-blue-400:oklch(70.7% .165 254.624);--color-blue-500:oklch(62.3% .214 259.815);--color-blue-600:oklch(54.6% .245 262.881);--color-blue-700:oklch(48.8% .243 264.376);--color-blue-800:oklch(42.4% .199 265.638);--color-blue-900:oklch(37.9% .146 265.522);--color-blue-950:oklch(28.2% .091 267.935);--color-purple-50:oklch(97.7% .014 308.299);--color-purple-100:oklch(94.6% .033 307.174);--color-purple-400:oklch(71.4% .203 305.504);--color-purple-500:oklch(62.7% .265 303.9);--color-purple-600:oklch(55.8% .288 302.321);--color-purple-900:oklch(38.1% .176 304.987);--color-gray-50:oklch(98.5% .002 247.839);--color-gray-100:oklch(96.7% .003 264.542);--color-gray-200:oklch(92.8% .006 264.531);--color-gray-300:oklch(87.2% .01 258.338);--color-gray-400:oklch(70.7% .022 261.325);--color-gray-500:oklch(55.1% .027 264.364);--color-gray-600:oklch(44.6% .03 256.802);--color-gray-700:oklch(37.3% .034 259.733);--color-gray-800:oklch(27.8% .033 256.848);--color-gray-900:oklch(21% .034 264.665);--color-black:#000;--color-white:#fff;--spacing:.25rem;--container-md:28rem;--container-lg:32rem;--container-2xl:42rem;--container-3xl:48rem;--container-4xl:56rem;--container-5xl:64rem;--container-6xl:72rem;--container-7xl:80rem;--text-xs:.75rem;--text-xs--line-height:calc(1/.75);--text-sm:.875rem;--text-sm--line-height:calc(1.25/.875);--text-base:1rem;--text-base--line-height: 1.5 ;--text-lg:1.125rem;--text-lg--line-height:calc(1.75/1.125);--text-xl:1.25rem;--text-xl--line-height:calc(1.75/1.25);--text-2xl:1.5rem;--text-2xl--line-height:calc(2/1.5);--font-weight-medium:500;--font-weight-semibold:600;--font-weight-bold:700;--tracking-tight:-.025em;--tracking-wide:.025em;--tracking-wider:.05em;--tracking-widest:.1em;--leading-tight:1.25;--leading-relaxed:1.625;--drop-shadow-lg:0 4px 4px #00000026;--ease-out:cubic-bezier(0,0,.2,1);--ease-in-out:cubic-bezier(.4,0,.2,1);--animate-spin:spin 1s linear infinite;--animate-pulse:pulse 2s cubic-bezier(.4,0,.6,1)infinite;--animate-bounce:bounce 1s infinite;--blur-sm:8px;--default-transition-duration:.15s;--default-transition-timing-function:cubic-bezier(.4,0,.2,1);--default-font-family:var(--font-sans);--default-mono-font-family:var(--font-mono)}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1}@supports (not ((-webkit-appearance:-apple-pay-button))) or (contain-intrinsic-size:1px){::placeholder{color:currentColor}@supports (color:color-mix(in lab,red,red)){::placeholder{color:color-mix(in oklab,currentcolor 50%,transparent)}}}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}::-webkit-calendar-picker-indicator{line-height:1}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){appearance:button}::file-selector-button{appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}*{border-color:var(--border);outline-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){*{outline-color:color-mix(in oklab,var(--ring)50%,transparent)}}body{background-color:var(--background);color:var(--foreground)}}@layer components;@layer utilities{.\@container\/card-header{container:card-header/inline-size}.pointer-events-none{pointer-events:none}.collapse{visibility:collapse}.visible{visibility:visible}.sr-only{clip:rect(0,0,0,0);white-space:nowrap;border-width:0;width:1px;height:1px;margin:-1px;padding:0;position:absolute;overflow:hidden}.absolute{position:absolute}.fixed{position:fixed}.relative{position:relative}.static{position:static}.inset-0{inset:calc(var(--spacing)*0)}.inset-2{inset:calc(var(--spacing)*2)}.inset-y-0{inset-block:calc(var(--spacing)*0)}.top-1{top:calc(var(--spacing)*1)}.top-2{top:calc(var(--spacing)*2)}.top-4{top:calc(var(--spacing)*4)}.top-\[30px\]{top:30px}.-right-2{right:calc(var(--spacing)*-2)}.right-0{right:calc(var(--spacing)*0)}.right-1{right:calc(var(--spacing)*1)}.right-2{right:calc(var(--spacing)*2)}.right-4{right:calc(var(--spacing)*4)}.bottom-0{bottom:calc(var(--spacing)*0)}.bottom-24{bottom:calc(var(--spacing)*24)}.-left-2{left:calc(var(--spacing)*-2)}.left-0{left:calc(var(--spacing)*0)}.left-1\/2{left:50%}.left-2{left:calc(var(--spacing)*2)}.left-\[18px\]{left:18px}.z-10{z-index:10}.z-20{z-index:20}.z-50{z-index:50}.col-start-2{grid-column-start:2}.row-span-2{grid-row:span 2/span 2}.row-start-1{grid-row-start:1}.container{width:100%}@media (min-width:40rem){.container{max-width:40rem}}@media (min-width:48rem){.container{max-width:48rem}}@media (min-width:64rem){.container{max-width:64rem}}@media (min-width:80rem){.container{max-width:80rem}}@media (min-width:96rem){.container{max-width:96rem}}.container\!{width:100%!important}@media (min-width:40rem){.container\!{max-width:40rem!important}}@media (min-width:48rem){.container\!{max-width:48rem!important}}@media (min-width:64rem){.container\!{max-width:64rem!important}}@media (min-width:80rem){.container\!{max-width:80rem!important}}@media (min-width:96rem){.container\!{max-width:96rem!important}}.m-2{margin:calc(var(--spacing)*2)}.-mx-1{margin-inline:calc(var(--spacing)*-1)}.mx-4{margin-inline:calc(var(--spacing)*4)}.mx-auto{margin-inline:auto}.my-1{margin-block:calc(var(--spacing)*1)}.my-2{margin-block:calc(var(--spacing)*2)}.my-3{margin-block:calc(var(--spacing)*3)}.my-4{margin-block:calc(var(--spacing)*4)}.mt-0{margin-top:calc(var(--spacing)*0)}.mt-0\.5{margin-top:calc(var(--spacing)*.5)}.mt-1{margin-top:calc(var(--spacing)*1)}.mt-2{margin-top:calc(var(--spacing)*2)}.mt-3{margin-top:calc(var(--spacing)*3)}.mt-4{margin-top:calc(var(--spacing)*4)}.mt-12{margin-top:calc(var(--spacing)*12)}.mr-1{margin-right:calc(var(--spacing)*1)}.mr-2{margin-right:calc(var(--spacing)*2)}.mb-1{margin-bottom:calc(var(--spacing)*1)}.mb-2{margin-bottom:calc(var(--spacing)*2)}.mb-3{margin-bottom:calc(var(--spacing)*3)}.mb-4{margin-bottom:calc(var(--spacing)*4)}.mb-6{margin-bottom:calc(var(--spacing)*6)}.mb-8{margin-bottom:calc(var(--spacing)*8)}.ml-0{margin-left:calc(var(--spacing)*0)}.ml-1{margin-left:calc(var(--spacing)*1)}.ml-2{margin-left:calc(var(--spacing)*2)}.ml-3{margin-left:calc(var(--spacing)*3)}.ml-4{margin-left:calc(var(--spacing)*4)}.ml-5{margin-left:calc(var(--spacing)*5)}.ml-6{margin-left:calc(var(--spacing)*6)}.ml-auto{margin-left:auto}.line-clamp-2{-webkit-line-clamp:2;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.line-clamp-3{-webkit-line-clamp:3;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.block{display:block}.contents{display:contents}.flex{display:flex}.grid{display:grid}.hidden{display:none}.inline{display:inline}.inline-block{display:inline-block}.inline-flex{display:inline-flex}.table{display:table}.field-sizing-content{field-sizing:content}.size-2{width:calc(var(--spacing)*2);height:calc(var(--spacing)*2)}.size-3\.5{width:calc(var(--spacing)*3.5);height:calc(var(--spacing)*3.5)}.size-4{width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.size-9{width:calc(var(--spacing)*9);height:calc(var(--spacing)*9)}.\!h-2{height:calc(var(--spacing)*2)!important}.h-0{height:calc(var(--spacing)*0)}.h-0\.5{height:calc(var(--spacing)*.5)}.h-1{height:calc(var(--spacing)*1)}.h-2{height:calc(var(--spacing)*2)}.h-2\.5{height:calc(var(--spacing)*2.5)}.h-3{height:calc(var(--spacing)*3)}.h-3\.5{height:calc(var(--spacing)*3.5)}.h-4{height:calc(var(--spacing)*4)}.h-5{height:calc(var(--spacing)*5)}.h-6{height:calc(var(--spacing)*6)}.h-7{height:calc(var(--spacing)*7)}.h-8{height:calc(var(--spacing)*8)}.h-9{height:calc(var(--spacing)*9)}.h-10{height:calc(var(--spacing)*10)}.h-12{height:calc(var(--spacing)*12)}.h-14{height:calc(var(--spacing)*14)}.h-16{height:calc(var(--spacing)*16)}.h-32{height:calc(var(--spacing)*32)}.h-\[1\.2rem\]{height:1.2rem}.h-\[1px\]{height:1px}.h-\[85vh\]{height:85vh}.h-\[500px\]{height:500px}.h-\[calc\(100\%\+8px\)\]{height:calc(100% + 8px)}.h-\[calc\(100vh-3\.5rem\)\]{height:calc(100vh - 3.5rem)}.h-\[calc\(100vh-3\.7rem\)\]{height:calc(100vh - 3.7rem)}.h-\[var\(--radix-select-trigger-height\)\]{height:var(--radix-select-trigger-height)}.h-full{height:100%}.h-px{height:1px}.h-screen{height:100vh}.max-h-\(--radix-dropdown-menu-content-available-height\){max-height:var(--radix-dropdown-menu-content-available-height)}.max-h-\(--radix-select-content-available-height\){max-height:var(--radix-select-content-available-height)}.max-h-20{max-height:calc(var(--spacing)*20)}.max-h-32{max-height:calc(var(--spacing)*32)}.max-h-40{max-height:calc(var(--spacing)*40)}.max-h-48{max-height:calc(var(--spacing)*48)}.max-h-60{max-height:calc(var(--spacing)*60)}.max-h-64{max-height:calc(var(--spacing)*64)}.max-h-\[85vh\]{max-height:85vh}.max-h-\[90vh\]{max-height:90vh}.max-h-\[200px\]{max-height:200px}.max-h-\[400px\]{max-height:400px}.max-h-none{max-height:none}.max-h-screen{max-height:100vh}.\!min-h-0{min-height:calc(var(--spacing)*0)!important}.min-h-0{min-height:calc(var(--spacing)*0)}.min-h-16{min-height:calc(var(--spacing)*16)}.min-h-\[36px\]{min-height:36px}.min-h-\[40px\]{min-height:40px}.min-h-\[50vh\]{min-height:50vh}.min-h-\[400px\]{min-height:400px}.min-h-screen{min-height:100vh}.\!w-2{width:calc(var(--spacing)*2)!important}.w-1{width:calc(var(--spacing)*1)}.w-2{width:calc(var(--spacing)*2)}.w-2\.5{width:calc(var(--spacing)*2.5)}.w-3{width:calc(var(--spacing)*3)}.w-3\.5{width:calc(var(--spacing)*3.5)}.w-4{width:calc(var(--spacing)*4)}.w-5{width:calc(var(--spacing)*5)}.w-6{width:calc(var(--spacing)*6)}.w-8{width:calc(var(--spacing)*8)}.w-9{width:calc(var(--spacing)*9)}.w-10{width:calc(var(--spacing)*10)}.w-12{width:calc(var(--spacing)*12)}.w-16{width:calc(var(--spacing)*16)}.w-56{width:calc(var(--spacing)*56)}.w-64{width:calc(var(--spacing)*64)}.w-80{width:calc(var(--spacing)*80)}.w-\[1\.2rem\]{width:1.2rem}.w-\[1px\]{width:1px}.w-\[28rem\]{width:28rem}.w-\[90vw\]{width:90vw}.w-\[600px\]{width:600px}.w-\[800px\]{width:800px}.w-fit{width:fit-content}.w-full{width:100%}.w-px{width:1px}.max-w-2xl{max-width:var(--container-2xl)}.max-w-3xl{max-width:var(--container-3xl)}.max-w-4xl{max-width:var(--container-4xl)}.max-w-6xl{max-width:var(--container-6xl)}.max-w-7xl{max-width:var(--container-7xl)}.max-w-\[80\%\]{max-width:80%}.max-w-\[90vw\]{max-width:90vw}.max-w-full{max-width:100%}.max-w-lg{max-width:var(--container-lg)}.max-w-md{max-width:var(--container-md)}.max-w-none{max-width:none}.\!min-w-0{min-width:calc(var(--spacing)*0)!important}.min-w-0{min-width:calc(var(--spacing)*0)}.min-w-\[8rem\]{min-width:8rem}.min-w-\[300px\]{min-width:300px}.min-w-\[400px\]{min-width:400px}.min-w-\[800px\]{min-width:800px}.min-w-\[var\(--radix-select-trigger-width\)\]{min-width:var(--radix-select-trigger-width)}.min-w-full{min-width:100%}.flex-1{flex:1}.flex-shrink-0,.shrink-0{flex-shrink:0}.origin-\(--radix-dropdown-menu-content-transform-origin\){transform-origin:var(--radix-dropdown-menu-content-transform-origin)}.origin-\(--radix-select-content-transform-origin\){transform-origin:var(--radix-select-content-transform-origin)}.-translate-x-1\/2{--tw-translate-x: -50% ;translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-0{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-4{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.scale-0{--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-75{--tw-scale-x:75%;--tw-scale-y:75%;--tw-scale-z:75%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-100{--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.rotate-0{rotate:none}.rotate-90{rotate:90deg}.transform{transform:var(--tw-rotate-x,)var(--tw-rotate-y,)var(--tw-rotate-z,)var(--tw-skew-x,)var(--tw-skew-y,)}.animate-bounce{animation:var(--animate-bounce)}.animate-in{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.animate-pulse{animation:var(--animate-pulse)}.animate-spin{animation:var(--animate-spin)}.cursor-col-resize{cursor:col-resize}.cursor-default{cursor:default}.cursor-pointer{cursor:pointer}.touch-none{touch-action:none}.resize{resize:both}.resize-none{resize:none}.scroll-my-1{scroll-margin-block:calc(var(--spacing)*1)}.list-inside{list-style-position:inside}.list-decimal{list-style-type:decimal}.list-disc{list-style-type:disc}.list-none{list-style-type:none}.auto-rows-min{grid-auto-rows:min-content}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.grid-cols-\[auto_auto_1fr_auto\]{grid-template-columns:auto auto 1fr auto}.grid-rows-\[auto_auto\]{grid-template-rows:auto auto}.flex-col{flex-direction:column}.flex-row-reverse{flex-direction:row-reverse}.flex-wrap{flex-wrap:wrap}.items-center{align-items:center}.items-end{align-items:flex-end}.items-start{align-items:flex-start}.items-stretch{align-items:stretch}.justify-between{justify-content:space-between}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}.gap-0{gap:calc(var(--spacing)*0)}.gap-1{gap:calc(var(--spacing)*1)}.gap-1\.5{gap:calc(var(--spacing)*1.5)}.gap-2{gap:calc(var(--spacing)*2)}.gap-3{gap:calc(var(--spacing)*3)}.gap-4{gap:calc(var(--spacing)*4)}.gap-6{gap:calc(var(--spacing)*6)}:where(.space-y-0\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-2>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*2)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-3>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*3)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*3)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-4>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*4)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*4)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-6>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*6)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*6)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-x-1>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*1)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-x-reverse)))}:where(.space-x-2>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*2)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-x-reverse)))}.self-start{align-self:flex-start}.justify-self-end{justify-self:flex-end}.truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.overflow-auto{overflow:auto}.overflow-hidden{overflow:hidden}.overflow-x-auto{overflow-x:auto}.overflow-x-hidden{overflow-x:hidden}.overflow-y-auto{overflow-y:auto}.\!rounded-full{border-radius:3.40282e38px!important}.rounded{border-radius:.25rem}.rounded-\[4px\]{border-radius:4px}.rounded-\[inherit\]{border-radius:inherit}.rounded-full{border-radius:3.40282e38px}.rounded-lg{border-radius:var(--radius)}.rounded-md{border-radius:calc(var(--radius) - 2px)}.rounded-none{border-radius:0}.rounded-sm{border-radius:calc(var(--radius) - 4px)}.rounded-l-none{border-top-left-radius:0;border-bottom-left-radius:0}.rounded-r-none{border-top-right-radius:0;border-bottom-right-radius:0}.\!border{border-style:var(--tw-border-style)!important;border-width:1px!important}.border{border-style:var(--tw-border-style);border-width:1px}.border-2{border-style:var(--tw-border-style);border-width:2px}.border-t{border-top-style:var(--tw-border-style);border-top-width:1px}.border-r{border-right-style:var(--tw-border-style);border-right-width:1px}.border-b{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.border-l{border-left-style:var(--tw-border-style);border-left-width:1px}.border-l-0{border-left-style:var(--tw-border-style);border-left-width:0}.border-l-2{border-left-style:var(--tw-border-style);border-left-width:2px}.border-l-4{border-left-style:var(--tw-border-style);border-left-width:4px}.border-dashed{--tw-border-style:dashed;border-style:dashed}.\!border-gray-600{border-color:var(--color-gray-600)!important}.border-\[\#643FB2\]{border-color:#643fb2}.border-\[\#643FB2\]\/20{border-color:#643fb233}.border-\[\#643FB2\]\/30{border-color:#643fb24d}.border-amber-200{border-color:var(--color-amber-200)}.border-blue-200{border-color:var(--color-blue-200)}.border-blue-300{border-color:var(--color-blue-300)}.border-blue-400{border-color:var(--color-blue-400)}.border-blue-500{border-color:var(--color-blue-500)}.border-border,.border-border\/50{border-color:var(--border)}@supports (color:color-mix(in lab,red,red)){.border-border\/50{border-color:color-mix(in oklab,var(--border)50%,transparent)}}.border-current\/30{border-color:currentColor}@supports (color:color-mix(in lab,red,red)){.border-current\/30{border-color:color-mix(in oklab,currentcolor 30%,transparent)}}.border-destructive\/30{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/30{border-color:color-mix(in oklab,var(--destructive)30%,transparent)}}.border-foreground\/5{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/5{border-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.border-foreground\/10{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/10{border-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.border-foreground\/20{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/20{border-color:color-mix(in oklab,var(--foreground)20%,transparent)}}.border-gray-200{border-color:var(--color-gray-200)}.border-gray-300{border-color:var(--color-gray-300)}.border-gray-400{border-color:var(--color-gray-400)}.border-gray-500\/20{border-color:#6a728233}@supports (color:color-mix(in lab,red,red)){.border-gray-500\/20{border-color:color-mix(in oklab,var(--color-gray-500)20%,transparent)}}.border-green-200{border-color:var(--color-green-200)}.border-green-500{border-color:var(--color-green-500)}.border-green-500\/20{border-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.border-green-500\/20{border-color:color-mix(in oklab,var(--color-green-500)20%,transparent)}}.border-green-500\/40{border-color:#00c75866}@supports (color:color-mix(in lab,red,red)){.border-green-500\/40{border-color:color-mix(in oklab,var(--color-green-500)40%,transparent)}}.border-green-600\/20{border-color:#00a54433}@supports (color:color-mix(in lab,red,red)){.border-green-600\/20{border-color:color-mix(in oklab,var(--color-green-600)20%,transparent)}}.border-input{border-color:var(--input)}.border-muted{border-color:var(--muted)}.border-orange-200{border-color:var(--color-orange-200)}.border-orange-300{border-color:var(--color-orange-300)}.border-orange-500{border-color:var(--color-orange-500)}.border-orange-500\/20{border-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.border-orange-500\/20{border-color:color-mix(in oklab,var(--color-orange-500)20%,transparent)}}.border-orange-500\/40{border-color:#fe6e0066}@supports (color:color-mix(in lab,red,red)){.border-orange-500\/40{border-color:color-mix(in oklab,var(--color-orange-500)40%,transparent)}}.border-orange-600\/20{border-color:#f0510033}@supports (color:color-mix(in lab,red,red)){.border-orange-600\/20{border-color:color-mix(in oklab,var(--color-orange-600)20%,transparent)}}.border-primary,.border-primary\/20{border-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.border-primary\/20{border-color:color-mix(in oklab,var(--primary)20%,transparent)}}.border-red-200{border-color:var(--color-red-200)}.border-red-500{border-color:var(--color-red-500)}.border-red-500\/20{border-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.border-red-500\/20{border-color:color-mix(in oklab,var(--color-red-500)20%,transparent)}}.border-transparent{border-color:#0000}.border-yellow-200{border-color:var(--color-yellow-200)}.border-t-transparent{border-top-color:#0000}.border-l-transparent{border-left-color:#0000}.bg-\[\#643FB2\]{background-color:#643fb2}.bg-\[\#643FB2\]\/10{background-color:#643fb21a}.bg-accent\/10{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.bg-accent\/10{background-color:color-mix(in oklab,var(--accent)10%,transparent)}}.bg-amber-50{background-color:var(--color-amber-50)}.bg-background,.bg-background\/50{background-color:var(--background)}@supports (color:color-mix(in lab,red,red)){.bg-background\/50{background-color:color-mix(in oklab,var(--background)50%,transparent)}}.bg-black{background-color:var(--color-black)}.bg-black\/50{background-color:#00000080}@supports (color:color-mix(in lab,red,red)){.bg-black\/50{background-color:color-mix(in oklab,var(--color-black)50%,transparent)}}.bg-black\/60{background-color:#0009}@supports (color:color-mix(in lab,red,red)){.bg-black\/60{background-color:color-mix(in oklab,var(--color-black)60%,transparent)}}.bg-blue-50{background-color:var(--color-blue-50)}.bg-blue-50\/80{background-color:#eff6ffcc}@supports (color:color-mix(in lab,red,red)){.bg-blue-50\/80{background-color:color-mix(in oklab,var(--color-blue-50)80%,transparent)}}.bg-blue-50\/95{background-color:#eff6fff2}@supports (color:color-mix(in lab,red,red)){.bg-blue-50\/95{background-color:color-mix(in oklab,var(--color-blue-50)95%,transparent)}}.bg-blue-100{background-color:var(--color-blue-100)}.bg-blue-500{background-color:var(--color-blue-500)}.bg-blue-500\/5{background-color:#3080ff0d}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/5{background-color:color-mix(in oklab,var(--color-blue-500)5%,transparent)}}.bg-blue-500\/10{background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/10{background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.bg-blue-600{background-color:var(--color-blue-600)}.bg-border{background-color:var(--border)}.bg-card{background-color:var(--card)}.bg-current{background-color:currentColor}.bg-destructive,.bg-destructive\/10{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.bg-destructive\/10{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.bg-foreground\/5{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/5{background-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.bg-foreground\/10{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/10{background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.bg-gray-50{background-color:var(--color-gray-50)}.bg-gray-100{background-color:var(--color-gray-100)}.bg-gray-200{background-color:var(--color-gray-200)}.bg-gray-400{background-color:var(--color-gray-400)}.bg-gray-500\/10{background-color:#6a72821a}@supports (color:color-mix(in lab,red,red)){.bg-gray-500\/10{background-color:color-mix(in oklab,var(--color-gray-500)10%,transparent)}}.bg-gray-900\/90{background-color:#101828e6}@supports (color:color-mix(in lab,red,red)){.bg-gray-900\/90{background-color:color-mix(in oklab,var(--color-gray-900)90%,transparent)}}.bg-green-50{background-color:var(--color-green-50)}.bg-green-100{background-color:var(--color-green-100)}.bg-green-500{background-color:var(--color-green-500)}.bg-green-500\/5{background-color:#00c7580d}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/5{background-color:color-mix(in oklab,var(--color-green-500)5%,transparent)}}.bg-green-500\/10{background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/10{background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.bg-muted{background-color:var(--muted)}.bg-muted-foreground\/30{background-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.bg-muted-foreground\/30{background-color:color-mix(in oklab,var(--muted-foreground)30%,transparent)}}.bg-muted\/30{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/30{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.bg-muted\/50{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/50{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.bg-orange-50{background-color:var(--color-orange-50)}.bg-orange-50\/50{background-color:#fff7ed80}@supports (color:color-mix(in lab,red,red)){.bg-orange-50\/50{background-color:color-mix(in oklab,var(--color-orange-50)50%,transparent)}}.bg-orange-100{background-color:var(--color-orange-100)}.bg-orange-100\/50{background-color:#ffedd580}@supports (color:color-mix(in lab,red,red)){.bg-orange-100\/50{background-color:color-mix(in oklab,var(--color-orange-100)50%,transparent)}}.bg-orange-500{background-color:var(--color-orange-500)}.bg-orange-500\/5{background-color:#fe6e000d}@supports (color:color-mix(in lab,red,red)){.bg-orange-500\/5{background-color:color-mix(in oklab,var(--color-orange-500)5%,transparent)}}.bg-orange-500\/10{background-color:#fe6e001a}@supports (color:color-mix(in lab,red,red)){.bg-orange-500\/10{background-color:color-mix(in oklab,var(--color-orange-500)10%,transparent)}}.bg-popover{background-color:var(--popover)}.bg-primary,.bg-primary\/10{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/10{background-color:color-mix(in oklab,var(--primary)10%,transparent)}}.bg-primary\/30{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/30{background-color:color-mix(in oklab,var(--primary)30%,transparent)}}.bg-primary\/40{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/40{background-color:color-mix(in oklab,var(--primary)40%,transparent)}}.bg-purple-50{background-color:var(--color-purple-50)}.bg-purple-100{background-color:var(--color-purple-100)}.bg-red-50{background-color:var(--color-red-50)}.bg-red-100{background-color:var(--color-red-100)}.bg-red-500{background-color:var(--color-red-500)}.bg-red-500\/10{background-color:#fb2c361a}@supports (color:color-mix(in lab,red,red)){.bg-red-500\/10{background-color:color-mix(in oklab,var(--color-red-500)10%,transparent)}}.bg-secondary{background-color:var(--secondary)}.bg-transparent{background-color:#0000}.bg-white{background-color:var(--color-white)}.bg-white\/60{background-color:#fff9}@supports (color:color-mix(in lab,red,red)){.bg-white\/60{background-color:color-mix(in oklab,var(--color-white)60%,transparent)}}.bg-white\/90{background-color:#ffffffe6}@supports (color:color-mix(in lab,red,red)){.bg-white\/90{background-color:color-mix(in oklab,var(--color-white)90%,transparent)}}.bg-yellow-100{background-color:var(--color-yellow-100)}.fill-current{fill:currentColor}.object-cover{object-fit:cover}.p-0{padding:calc(var(--spacing)*0)}.p-1{padding:calc(var(--spacing)*1)}.p-1\.5{padding:calc(var(--spacing)*1.5)}.p-2{padding:calc(var(--spacing)*2)}.p-3{padding:calc(var(--spacing)*3)}.p-4{padding:calc(var(--spacing)*4)}.p-6{padding:calc(var(--spacing)*6)}.p-8{padding:calc(var(--spacing)*8)}.p-\[1px\]{padding:1px}.px-1{padding-inline:calc(var(--spacing)*1)}.px-1\.5{padding-inline:calc(var(--spacing)*1.5)}.px-2{padding-inline:calc(var(--spacing)*2)}.px-2\.5{padding-inline:calc(var(--spacing)*2.5)}.px-3{padding-inline:calc(var(--spacing)*3)}.px-4{padding-inline:calc(var(--spacing)*4)}.px-6{padding-inline:calc(var(--spacing)*6)}.px-8{padding-inline:calc(var(--spacing)*8)}.py-0{padding-block:calc(var(--spacing)*0)}.py-0\.5{padding-block:calc(var(--spacing)*.5)}.py-1{padding-block:calc(var(--spacing)*1)}.py-1\.5{padding-block:calc(var(--spacing)*1.5)}.py-2{padding-block:calc(var(--spacing)*2)}.py-2\.5{padding-block:calc(var(--spacing)*2.5)}.py-3{padding-block:calc(var(--spacing)*3)}.py-4{padding-block:calc(var(--spacing)*4)}.py-6{padding-block:calc(var(--spacing)*6)}.py-8{padding-block:calc(var(--spacing)*8)}.pt-0{padding-top:calc(var(--spacing)*0)}.pt-1{padding-top:calc(var(--spacing)*1)}.pt-2{padding-top:calc(var(--spacing)*2)}.pt-3{padding-top:calc(var(--spacing)*3)}.pt-4{padding-top:calc(var(--spacing)*4)}.pt-6{padding-top:calc(var(--spacing)*6)}.pt-8{padding-top:calc(var(--spacing)*8)}.pr-2{padding-right:calc(var(--spacing)*2)}.pr-4{padding-right:calc(var(--spacing)*4)}.pr-8{padding-right:calc(var(--spacing)*8)}.pb-2{padding-bottom:calc(var(--spacing)*2)}.pb-3{padding-bottom:calc(var(--spacing)*3)}.pb-4{padding-bottom:calc(var(--spacing)*4)}.pb-6{padding-bottom:calc(var(--spacing)*6)}.pl-2{padding-left:calc(var(--spacing)*2)}.pl-3{padding-left:calc(var(--spacing)*3)}.pl-4{padding-left:calc(var(--spacing)*4)}.pl-5{padding-left:calc(var(--spacing)*5)}.pl-8{padding-left:calc(var(--spacing)*8)}.text-center{text-align:center}.text-left{text-align:left}.font-mono{font-family:var(--font-mono)}.text-2xl{font-size:var(--text-2xl);line-height:var(--tw-leading,var(--text-2xl--line-height))}.text-base{font-size:var(--text-base);line-height:var(--tw-leading,var(--text-base--line-height))}.text-lg{font-size:var(--text-lg);line-height:var(--tw-leading,var(--text-lg--line-height))}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xl{font-size:var(--text-xl);line-height:var(--tw-leading,var(--text-xl--line-height))}.text-xs{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.text-\[10px\]{font-size:10px}.leading-none{--tw-leading:1;line-height:1}.leading-relaxed{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.leading-tight{--tw-leading:var(--leading-tight);line-height:var(--leading-tight)}.font-bold{--tw-font-weight:var(--font-weight-bold);font-weight:var(--font-weight-bold)}.font-medium{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.font-semibold{--tw-font-weight:var(--font-weight-semibold);font-weight:var(--font-weight-semibold)}.tracking-tight{--tw-tracking:var(--tracking-tight);letter-spacing:var(--tracking-tight)}.tracking-wide{--tw-tracking:var(--tracking-wide);letter-spacing:var(--tracking-wide)}.tracking-wider{--tw-tracking:var(--tracking-wider);letter-spacing:var(--tracking-wider)}.tracking-widest{--tw-tracking:var(--tracking-widest);letter-spacing:var(--tracking-widest)}.break-words{overflow-wrap:break-word}.break-all{word-break:break-all}.whitespace-nowrap{white-space:nowrap}.whitespace-pre-wrap{white-space:pre-wrap}.text-\[\#643FB2\]{color:#643fb2}.text-amber-500{color:var(--color-amber-500)}.text-amber-600{color:var(--color-amber-600)}.text-amber-700{color:var(--color-amber-700)}.text-amber-800{color:var(--color-amber-800)}.text-amber-900{color:var(--color-amber-900)}.text-blue-500{color:var(--color-blue-500)}.text-blue-500\/80{color:#3080ffcc}@supports (color:color-mix(in lab,red,red)){.text-blue-500\/80{color:color-mix(in oklab,var(--color-blue-500)80%,transparent)}}.text-blue-600{color:var(--color-blue-600)}.text-blue-700{color:var(--color-blue-700)}.text-blue-800{color:var(--color-blue-800)}.text-blue-900{color:var(--color-blue-900)}.text-card-foreground{color:var(--card-foreground)}.text-current{color:currentColor}.text-destructive,.text-destructive\/70{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.text-destructive\/70{color:color-mix(in oklab,var(--destructive)70%,transparent)}}.text-destructive\/90{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.text-destructive\/90{color:color-mix(in oklab,var(--destructive)90%,transparent)}}.text-foreground{color:var(--foreground)}.text-gray-300{color:var(--color-gray-300)}.text-gray-400{color:var(--color-gray-400)}.text-gray-500{color:var(--color-gray-500)}.text-gray-600{color:var(--color-gray-600)}.text-gray-700{color:var(--color-gray-700)}.text-gray-900{color:var(--color-gray-900)}.text-green-500{color:var(--color-green-500)}.text-green-600{color:var(--color-green-600)}.text-green-700{color:var(--color-green-700)}.text-green-800{color:var(--color-green-800)}.text-green-900{color:var(--color-green-900)}.text-muted-foreground,.text-muted-foreground\/60{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/60{color:color-mix(in oklab,var(--muted-foreground)60%,transparent)}}.text-muted-foreground\/70{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/70{color:color-mix(in oklab,var(--muted-foreground)70%,transparent)}}.text-muted-foreground\/80{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/80{color:color-mix(in oklab,var(--muted-foreground)80%,transparent)}}.text-orange-500{color:var(--color-orange-500)}.text-orange-600{color:var(--color-orange-600)}.text-orange-700{color:var(--color-orange-700)}.text-orange-800{color:var(--color-orange-800)}.text-orange-900{color:var(--color-orange-900)}.text-popover-foreground{color:var(--popover-foreground)}.text-primary{color:var(--primary)}.text-primary-foreground{color:var(--primary-foreground)}.text-purple-500{color:var(--color-purple-500)}.text-purple-600{color:var(--color-purple-600)}.text-red-400{color:var(--color-red-400)}.text-red-500{color:var(--color-red-500)}.text-red-600{color:var(--color-red-600)}.text-red-700{color:var(--color-red-700)}.text-red-800{color:var(--color-red-800)}.text-secondary-foreground{color:var(--secondary-foreground)}.text-white{color:var(--color-white)}.text-yellow-600{color:var(--color-yellow-600)}.text-yellow-700{color:var(--color-yellow-700)}.capitalize{text-transform:capitalize}.lowercase{text-transform:lowercase}.uppercase{text-transform:uppercase}.italic{font-style:italic}.underline-offset-4{text-underline-offset:4px}.opacity-0{opacity:0}.opacity-50{opacity:.5}.opacity-60{opacity:.6}.opacity-70{opacity:.7}.opacity-80{opacity:.8}.opacity-100{opacity:1}.shadow{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-lg{--tw-shadow:0 10px 15px -3px var(--tw-shadow-color,#0000001a),0 4px 6px -4px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-md{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-sm{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-xs{--tw-shadow:0 1px 2px 0 var(--tw-shadow-color,#0000000d);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-0{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(0px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-2{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[\#643FB2\]\/20{--tw-shadow-color:#643fb233}@supports (color:color-mix(in lab,red,red)){.shadow-\[\#643FB2\]\/20{--tw-shadow-color:color-mix(in oklab,oklab(47.4316% .069152 -.159147/.2) var(--tw-shadow-alpha),transparent)}}.shadow-green-500\/20{--tw-shadow-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.shadow-green-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-green-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-orange-500\/20{--tw-shadow-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.shadow-orange-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-orange-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-primary\/25{--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.shadow-primary\/25{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)25%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-red-500\/20{--tw-shadow-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.shadow-red-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-red-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.ring-blue-500{--tw-ring-color:var(--color-blue-500)}.ring-blue-500\/20{--tw-ring-color:#3080ff33}@supports (color:color-mix(in lab,red,red)){.ring-blue-500\/20{--tw-ring-color:color-mix(in oklab,var(--color-blue-500)20%,transparent)}}.ring-offset-2{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.ring-offset-background{--tw-ring-offset-color:var(--background)}.outline-hidden{--tw-outline-style:none;outline-style:none}@media (forced-colors:active){.outline-hidden{outline-offset:2px;outline:2px solid #0000}}.outline{outline-style:var(--tw-outline-style);outline-width:1px}.drop-shadow-lg{--tw-drop-shadow-size:drop-shadow(0 4px 4px var(--tw-drop-shadow-color,#00000026));--tw-drop-shadow:drop-shadow(var(--drop-shadow-lg));filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.filter{filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.backdrop-blur-sm{--tw-backdrop-blur:blur(var(--blur-sm));-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.transition{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to,opacity,box-shadow,transform,translate,scale,rotate,filter,-webkit-backdrop-filter,backdrop-filter,display,visibility,content-visibility,overlay,pointer-events;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-\[color\,box-shadow\]{transition-property:color,box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-all{transition-property:all;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-colors{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-opacity{transition-property:opacity;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-shadow{transition-property:box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-transform{transition-property:transform,translate,scale,rotate;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-none{transition-property:none}.duration-200{--tw-duration:.2s;transition-duration:.2s}.duration-300{--tw-duration:.3s;transition-duration:.3s}.ease-in-out{--tw-ease:var(--ease-in-out);transition-timing-function:var(--ease-in-out)}.ease-out{--tw-ease:var(--ease-out);transition-timing-function:var(--ease-out)}.outline-none{--tw-outline-style:none;outline-style:none}.select-none{-webkit-user-select:none;user-select:none}.\[animation-delay\:-0\.3s\]{animation-delay:-.3s}.\[animation-delay\:-0\.15s\]{animation-delay:-.15s}.fade-in{--tw-enter-opacity:0}.running{animation-play-state:running}.slide-in-from-bottom-2{--tw-enter-translate-y:calc(2*var(--spacing))}.group-open\:rotate-90:is(:where(.group):is([open],:popover-open,:open) *){rotate:90deg}.group-open\:rotate-180:is(:where(.group):is([open],:popover-open,:open) *){rotate:180deg}@media (hover:hover){.group-hover\:bg-primary:is(:where(.group):hover *){background-color:var(--primary)}.group-hover\:opacity-100:is(:where(.group):hover *){opacity:1}.group-hover\:shadow-md:is(:where(.group):hover *){--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)20%,transparent)var(--tw-shadow-alpha),transparent)}}}.group-data-\[disabled\=true\]\:pointer-events-none:is(:where(.group)[data-disabled=true] *){pointer-events:none}.group-data-\[disabled\=true\]\:opacity-50:is(:where(.group)[data-disabled=true] *){opacity:.5}.peer-disabled\:cursor-not-allowed:is(:where(.peer):disabled~*){cursor:not-allowed}.peer-disabled\:opacity-50:is(:where(.peer):disabled~*){opacity:.5}.selection\:bg-primary ::selection{background-color:var(--primary)}.selection\:bg-primary::selection{background-color:var(--primary)}.selection\:text-primary-foreground ::selection{color:var(--primary-foreground)}.selection\:text-primary-foreground::selection{color:var(--primary-foreground)}.file\:inline-flex::file-selector-button{display:inline-flex}.file\:h-7::file-selector-button{height:calc(var(--spacing)*7)}.file\:border-0::file-selector-button{border-style:var(--tw-border-style);border-width:0}.file\:bg-transparent::file-selector-button{background-color:#0000}.file\:text-sm::file-selector-button{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.file\:font-medium::file-selector-button{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.file\:text-foreground::file-selector-button{color:var(--foreground)}.placeholder\:text-muted-foreground::placeholder{color:var(--muted-foreground)}.first\:mt-0:first-child{margin-top:calc(var(--spacing)*0)}.last\:border-r-0:last-child{border-right-style:var(--tw-border-style);border-right-width:0}.last\:border-b-0:last-child{border-bottom-style:var(--tw-border-style);border-bottom-width:0}@media (hover:hover){.hover\:border-gray-300:hover{border-color:var(--color-gray-300)}.hover\:border-muted-foreground\/30:hover{border-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.hover\:border-muted-foreground\/30:hover{border-color:color-mix(in oklab,var(--muted-foreground)30%,transparent)}}.hover\:bg-accent:hover,.hover\:bg-accent\/50:hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-accent\/50:hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.hover\:bg-amber-100:hover{background-color:var(--color-amber-100)}.hover\:bg-blue-700:hover{background-color:var(--color-blue-700)}.hover\:bg-destructive\/80:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/80:hover{background-color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:bg-destructive\/90:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/90:hover{background-color:color-mix(in oklab,var(--destructive)90%,transparent)}}.hover\:bg-muted:hover,.hover\:bg-muted\/50:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/50:hover{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.hover\:bg-orange-100:hover{background-color:var(--color-orange-100)}.hover\:bg-primary\/20:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/20:hover{background-color:color-mix(in oklab,var(--primary)20%,transparent)}}.hover\:bg-primary\/80:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/80:hover{background-color:color-mix(in oklab,var(--primary)80%,transparent)}}.hover\:bg-primary\/90:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/90:hover{background-color:color-mix(in oklab,var(--primary)90%,transparent)}}.hover\:bg-red-50:hover{background-color:var(--color-red-50)}.hover\:bg-secondary\/80:hover{background-color:var(--secondary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-secondary\/80:hover{background-color:color-mix(in oklab,var(--secondary)80%,transparent)}}.hover\:bg-white:hover{background-color:var(--color-white)}.hover\:text-accent-foreground:hover{color:var(--accent-foreground)}.hover\:text-destructive\/80:hover{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:text-destructive\/80:hover{color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:text-foreground:hover{color:var(--foreground)}.hover\:text-orange-900:hover{color:var(--color-orange-900)}.hover\:text-primary:hover{color:var(--primary)}.hover\:text-red-600:hover{color:var(--color-red-600)}.hover\:underline:hover{text-decoration-line:underline}.hover\:opacity-70:hover{opacity:.7}.hover\:opacity-100:hover{opacity:1}.hover\:shadow-md:hover{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}}.focus\:bg-accent:focus{background-color:var(--accent)}.focus\:text-accent-foreground:focus{color:var(--accent-foreground)}.focus\:ring-2:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-ring:focus{--tw-ring-color:var(--ring)}.focus\:ring-offset-2:focus{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:outline-none:focus{--tw-outline-style:none;outline-style:none}.focus-visible\:border-ring:focus-visible{border-color:var(--ring)}.focus-visible\:ring-1:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(1px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-2:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-\[3px\]:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(3px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.focus-visible\:ring-ring:focus-visible,.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:color-mix(in oklab,var(--ring)50%,transparent)}}.focus-visible\:ring-offset-2:focus-visible{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus-visible\:ring-offset-background:focus-visible{--tw-ring-offset-color:var(--background)}.focus-visible\:outline-none:focus-visible{--tw-outline-style:none;outline-style:none}.disabled\:pointer-events-none:disabled{pointer-events:none}.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}.disabled\:opacity-50:disabled{opacity:.5}.has-data-\[slot\=card-action\]\:grid-cols-\[1fr_auto\]:has([data-slot=card-action]){grid-template-columns:1fr auto}.has-\[\>svg\]\:px-2\.5:has(>svg){padding-inline:calc(var(--spacing)*2.5)}.has-\[\>svg\]\:px-3:has(>svg){padding-inline:calc(var(--spacing)*3)}.has-\[\>svg\]\:px-4:has(>svg){padding-inline:calc(var(--spacing)*4)}.aria-invalid\:border-destructive[aria-invalid=true]{border-color:var(--destructive)}.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.data-\[disabled\]\:pointer-events-none[data-disabled]{pointer-events:none}.data-\[disabled\]\:opacity-50[data-disabled]{opacity:.5}.data-\[inset\]\:pl-8[data-inset]{padding-left:calc(var(--spacing)*8)}.data-\[placeholder\]\:text-muted-foreground[data-placeholder]{color:var(--muted-foreground)}.data-\[side\=bottom\]\:translate-y-1[data-side=bottom]{--tw-translate-y:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=bottom\]\:slide-in-from-top-2[data-side=bottom]{--tw-enter-translate-y:calc(2*var(--spacing)*-1)}.data-\[side\=left\]\:-translate-x-1[data-side=left]{--tw-translate-x:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=left\]\:slide-in-from-right-2[data-side=left]{--tw-enter-translate-x:calc(2*var(--spacing))}.data-\[side\=right\]\:translate-x-1[data-side=right]{--tw-translate-x:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=right\]\:slide-in-from-left-2[data-side=right]{--tw-enter-translate-x:calc(2*var(--spacing)*-1)}.data-\[side\=top\]\:-translate-y-1[data-side=top]{--tw-translate-y:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=top\]\:slide-in-from-bottom-2[data-side=top]{--tw-enter-translate-y:calc(2*var(--spacing))}.data-\[size\=default\]\:h-9[data-size=default]{height:calc(var(--spacing)*9)}.data-\[size\=sm\]\:h-8[data-size=sm]{height:calc(var(--spacing)*8)}:is(.\*\:data-\[slot\=select-value\]\:line-clamp-1>*)[data-slot=select-value]{-webkit-line-clamp:1;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}:is(.\*\:data-\[slot\=select-value\]\:flex>*)[data-slot=select-value]{display:flex}:is(.\*\:data-\[slot\=select-value\]\:items-center>*)[data-slot=select-value]{align-items:center}:is(.\*\:data-\[slot\=select-value\]\:gap-2>*)[data-slot=select-value]{gap:calc(var(--spacing)*2)}.data-\[state\=active\]\:bg-background[data-state=active]{background-color:var(--background)}.data-\[state\=active\]\:text-foreground[data-state=active]{color:var(--foreground)}.data-\[state\=active\]\:shadow[data-state=active]{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.data-\[state\=checked\]\:translate-x-4[data-state=checked]{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[state\=checked\]\:border-primary[data-state=checked]{border-color:var(--primary)}.data-\[state\=checked\]\:bg-primary[data-state=checked]{background-color:var(--primary)}.data-\[state\=checked\]\:text-primary-foreground[data-state=checked]{color:var(--primary-foreground)}.data-\[state\=closed\]\:animate-out[data-state=closed]{animation:exit var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=closed\]\:fade-out-0[data-state=closed]{--tw-exit-opacity:0}.data-\[state\=closed\]\:zoom-out-95[data-state=closed]{--tw-exit-scale:.95}.data-\[state\=open\]\:animate-in[data-state=open]{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=open\]\:bg-accent[data-state=open]{background-color:var(--accent)}.data-\[state\=open\]\:text-accent-foreground[data-state=open]{color:var(--accent-foreground)}.data-\[state\=open\]\:fade-in-0[data-state=open]{--tw-enter-opacity:0}.data-\[state\=open\]\:zoom-in-95[data-state=open]{--tw-enter-scale:.95}.data-\[state\=unchecked\]\:translate-x-0[data-state=unchecked]{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[state\=unchecked\]\:bg-input[data-state=unchecked]{background-color:var(--input)}.data-\[variant\=destructive\]\:text-destructive[data-variant=destructive]{color:var(--destructive)}.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.data-\[variant\=destructive\]\:focus\:text-destructive[data-variant=destructive]:focus{color:var(--destructive)}@media (min-width:40rem){.sm\:col-span-2{grid-column:span 2/span 2}.sm\:w-64{width:calc(var(--spacing)*64)}.sm\:max-w-lg{max-width:var(--container-lg)}.sm\:flex-none{flex:none}.sm\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.sm\:flex-row{flex-direction:row}.sm\:items-center{align-items:center}}@media (min-width:48rem){.md\:col-span-2{grid-column:span 2/span 2}.md\:col-start-2{grid-column-start:2}.md\:inline{display:inline}.md\:max-w-2xl{max-width:var(--container-2xl)}.md\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.md\:gap-8{gap:calc(var(--spacing)*8)}.md\:text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}}@media (min-width:64rem){.lg\:col-span-3{grid-column:span 3/span 3}.lg\:max-w-4xl{max-width:var(--container-4xl)}.lg\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.lg\:flex-row{flex-direction:row}.lg\:items-center{align-items:center}.lg\:justify-between{justify-content:space-between}}@media (min-width:80rem){.xl\:col-span-2{grid-column:span 2/span 2}.xl\:col-span-4{grid-column:span 4/span 4}.xl\:max-w-5xl{max-width:var(--container-5xl)}.xl\:grid-cols-4{grid-template-columns:repeat(4,minmax(0,1fr))}}.dark\:scale-0:is(.dark *){--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:scale-100:is(.dark *){--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:-rotate-90:is(.dark *){rotate:-90deg}.dark\:rotate-0:is(.dark *){rotate:none}.dark\:\!border-gray-500:is(.dark *){border-color:var(--color-gray-500)!important}.dark\:\!border-gray-600:is(.dark *){border-color:var(--color-gray-600)!important}.dark\:border-\[\#8B5CF6\]:is(.dark *){border-color:#8b5cf6}.dark\:border-\[\#8B5CF6\]\/20:is(.dark *){border-color:#8b5cf633}.dark\:border-\[\#8B5CF6\]\/30:is(.dark *){border-color:#8b5cf64d}.dark\:border-amber-800:is(.dark *){border-color:var(--color-amber-800)}.dark\:border-amber-900:is(.dark *){border-color:var(--color-amber-900)}.dark\:border-blue-400:is(.dark *){border-color:var(--color-blue-400)}.dark\:border-blue-500:is(.dark *){border-color:var(--color-blue-500)}.dark\:border-blue-700:is(.dark *){border-color:var(--color-blue-700)}.dark\:border-blue-800:is(.dark *){border-color:var(--color-blue-800)}.dark\:border-gray-500:is(.dark *){border-color:var(--color-gray-500)}.dark\:border-gray-600:is(.dark *){border-color:var(--color-gray-600)}.dark\:border-gray-700:is(.dark *){border-color:var(--color-gray-700)}.dark\:border-green-400:is(.dark *){border-color:var(--color-green-400)}.dark\:border-green-800:is(.dark *){border-color:var(--color-green-800)}.dark\:border-input:is(.dark *){border-color:var(--input)}.dark\:border-orange-400:is(.dark *){border-color:var(--color-orange-400)}.dark\:border-orange-700:is(.dark *){border-color:var(--color-orange-700)}.dark\:border-orange-800:is(.dark *){border-color:var(--color-orange-800)}.dark\:border-red-400:is(.dark *){border-color:var(--color-red-400)}.dark\:border-red-800:is(.dark *){border-color:var(--color-red-800)}.dark\:\!bg-gray-800\/90:is(.dark *){background-color:#1e2939e6!important}@supports (color:color-mix(in lab,red,red)){.dark\:\!bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)!important}}.dark\:bg-\[\#8B5CF6\]:is(.dark *){background-color:#8b5cf6}.dark\:bg-\[\#8B5CF6\]\/10:is(.dark *){background-color:#8b5cf61a}.dark\:bg-amber-950\/20:is(.dark *){background-color:#46190133}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)20%,transparent)}}.dark\:bg-amber-950\/50:is(.dark *){background-color:#46190180}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)50%,transparent)}}.dark\:bg-blue-500\/10:is(.dark *){background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.dark\:bg-blue-900:is(.dark *){background-color:var(--color-blue-900)}.dark\:bg-blue-900\/20:is(.dark *){background-color:#1c398e33}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-900\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-900)20%,transparent)}}.dark\:bg-blue-950\/20:is(.dark *){background-color:#16245633}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)20%,transparent)}}.dark\:bg-blue-950\/40:is(.dark *){background-color:#16245666}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/40:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)40%,transparent)}}.dark\:bg-blue-950\/50:is(.dark *){background-color:#16245680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)50%,transparent)}}.dark\:bg-blue-950\/95:is(.dark *){background-color:#162456f2}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/95:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)95%,transparent)}}.dark\:bg-card:is(.dark *){background-color:var(--card)}.dark\:bg-destructive\/60:is(.dark *){background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-destructive\/60:is(.dark *){background-color:color-mix(in oklab,var(--destructive)60%,transparent)}}.dark\:bg-foreground\/10:is(.dark *){background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-foreground\/10:is(.dark *){background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.dark\:bg-gray-500:is(.dark *){background-color:var(--color-gray-500)}.dark\:bg-gray-800:is(.dark *){background-color:var(--color-gray-800)}.dark\:bg-gray-800\/90:is(.dark *){background-color:#1e2939e6}@supports (color:color-mix(in lab,red,red)){.dark\:bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)}}.dark\:bg-gray-900:is(.dark *){background-color:var(--color-gray-900)}.dark\:bg-gray-900\/30:is(.dark *){background-color:#1018284d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-gray-900\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-900)30%,transparent)}}.dark\:bg-green-400:is(.dark *){background-color:var(--color-green-400)}.dark\:bg-green-500\/10:is(.dark *){background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.dark\:bg-green-900:is(.dark *){background-color:var(--color-green-900)}.dark\:bg-green-950:is(.dark *){background-color:var(--color-green-950)}.dark\:bg-green-950\/20:is(.dark *){background-color:#032e1533}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)20%,transparent)}}.dark\:bg-green-950\/50:is(.dark *){background-color:#032e1580}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)50%,transparent)}}.dark\:bg-input\/30:is(.dark *){background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-input\/30:is(.dark *){background-color:color-mix(in oklab,var(--input)30%,transparent)}}.dark\:bg-orange-400:is(.dark *){background-color:var(--color-orange-400)}.dark\:bg-orange-500\/10:is(.dark *){background-color:#fe6e001a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-500)10%,transparent)}}.dark\:bg-orange-900:is(.dark *){background-color:var(--color-orange-900)}.dark\:bg-orange-950:is(.dark *){background-color:var(--color-orange-950)}.dark\:bg-orange-950\/20:is(.dark *){background-color:#44130633}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)20%,transparent)}}.dark\:bg-orange-950\/30:is(.dark *){background-color:#4413064d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)30%,transparent)}}.dark\:bg-orange-950\/50:is(.dark *){background-color:#44130680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)50%,transparent)}}.dark\:bg-purple-900:is(.dark *){background-color:var(--color-purple-900)}.dark\:bg-red-400:is(.dark *){background-color:var(--color-red-400)}.dark\:bg-red-900:is(.dark *){background-color:var(--color-red-900)}.dark\:bg-red-950:is(.dark *){background-color:var(--color-red-950)}.dark\:bg-red-950\/20:is(.dark *){background-color:#46080933}@supports (color:color-mix(in lab,red,red)){.dark\:bg-red-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-red-950)20%,transparent)}}.dark\:text-\[\#8B5CF6\]:is(.dark *){color:#8b5cf6}.dark\:text-amber-100:is(.dark *){color:var(--color-amber-100)}.dark\:text-amber-200:is(.dark *){color:var(--color-amber-200)}.dark\:text-amber-300:is(.dark *){color:var(--color-amber-300)}.dark\:text-amber-400:is(.dark *){color:var(--color-amber-400)}.dark\:text-amber-500:is(.dark *){color:var(--color-amber-500)}.dark\:text-blue-100:is(.dark *){color:var(--color-blue-100)}.dark\:text-blue-200:is(.dark *){color:var(--color-blue-200)}.dark\:text-blue-300:is(.dark *){color:var(--color-blue-300)}.dark\:text-blue-400:is(.dark *){color:var(--color-blue-400)}.dark\:text-blue-400\/70:is(.dark *){color:#54a2ffb3}@supports (color:color-mix(in lab,red,red)){.dark\:text-blue-400\/70:is(.dark *){color:color-mix(in oklab,var(--color-blue-400)70%,transparent)}}.dark\:text-blue-500:is(.dark *){color:var(--color-blue-500)}.dark\:text-gray-100:is(.dark *){color:var(--color-gray-100)}.dark\:text-gray-300:is(.dark *){color:var(--color-gray-300)}.dark\:text-gray-400:is(.dark *){color:var(--color-gray-400)}.dark\:text-green-100:is(.dark *){color:var(--color-green-100)}.dark\:text-green-200:is(.dark *){color:var(--color-green-200)}.dark\:text-green-300:is(.dark *){color:var(--color-green-300)}.dark\:text-green-400:is(.dark *){color:var(--color-green-400)}.dark\:text-orange-100:is(.dark *){color:var(--color-orange-100)}.dark\:text-orange-200:is(.dark *){color:var(--color-orange-200)}.dark\:text-orange-300:is(.dark *){color:var(--color-orange-300)}.dark\:text-orange-400:is(.dark *){color:var(--color-orange-400)}.dark\:text-purple-400:is(.dark *){color:var(--color-purple-400)}.dark\:text-red-200:is(.dark *){color:var(--color-red-200)}.dark\:text-red-400:is(.dark *){color:var(--color-red-400)}.dark\:text-yellow-400:is(.dark *){color:var(--color-yellow-400)}.dark\:opacity-30:is(.dark *){opacity:.3}@media (hover:hover){.dark\:hover\:border-gray-600:is(.dark *):hover{border-color:var(--color-gray-600)}.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:#4619014d}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-amber-950)30%,transparent)}}.dark\:hover\:bg-gray-800:is(.dark *):hover{background-color:var(--color-gray-800)}.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--input)50%,transparent)}}.dark\:hover\:bg-orange-950\/40:is(.dark *):hover{background-color:#44130666}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-orange-950\/40:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-orange-950)40%,transparent)}}.dark\:hover\:bg-red-900\/20:is(.dark *):hover{background-color:#82181a33}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-red-900\/20:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-red-900)20%,transparent)}}.dark\:hover\:text-orange-200:is(.dark *):hover{color:var(--color-orange-200)}}.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:data-\[state\=checked\]\:bg-primary:is(.dark *)[data-state=checked]{background-color:var(--primary)}.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.\[\&_p\]\:leading-relaxed p{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.\[\&_svg\]\:pointer-events-none svg{pointer-events:none}.\[\&_svg\]\:shrink-0 svg{flex-shrink:0}.\[\&_svg\:not\(\[class\*\=\'size-\'\]\)\]\:size-4 svg:not([class*=size-]){width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.\[\&_svg\:not\(\[class\*\=\'text-\'\]\)\]\:text-muted-foreground svg:not([class*=text-]){color:var(--muted-foreground)}.\[\.border-b\]\:pb-6.border-b{padding-bottom:calc(var(--spacing)*6)}.\[\.border-t\]\:pt-6.border-t{padding-top:calc(var(--spacing)*6)}:is(.\*\:\[span\]\:last\:flex>*):is(span):last-child{display:flex}:is(.\*\:\[span\]\:last\:items-center>*):is(span):last-child{align-items:center}:is(.\*\:\[span\]\:last\:gap-2>*):is(span):last-child{gap:calc(var(--spacing)*2)}:is(.data-\[variant\=destructive\]\:\*\:\[svg\]\:\!text-destructive[data-variant=destructive]>*):is(svg){color:var(--destructive)!important}.\[\&\>svg\]\:absolute>svg{position:absolute}.\[\&\>svg\]\:top-4>svg{top:calc(var(--spacing)*4)}.\[\&\>svg\]\:left-4>svg{left:calc(var(--spacing)*4)}.\[\&\>svg\]\:text-foreground>svg{color:var(--foreground)}.\[\&\>svg\+div\]\:translate-y-\[-3px\]>svg+div{--tw-translate-y:-3px;translate:var(--tw-translate-x)var(--tw-translate-y)}.\[\&\>svg\~\*\]\:pl-7>svg~*{padding-left:calc(var(--spacing)*7)}}@property --tw-animation-delay{syntax:"*";inherits:false;initial-value:0s}@property --tw-animation-direction{syntax:"*";inherits:false;initial-value:normal}@property --tw-animation-duration{syntax:"*";inherits:false}@property --tw-animation-fill-mode{syntax:"*";inherits:false;initial-value:none}@property --tw-animation-iteration-count{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-translate-y{syntax:"*";inherits:false;initial-value:0}:root{--radius:.625rem;--background:oklch(100% 0 0);--foreground:oklch(14.5% 0 0);--card:oklch(100% 0 0);--card-foreground:oklch(14.5% 0 0);--popover:oklch(100% 0 0);--popover-foreground:oklch(14.5% 0 0);--primary:oklch(48% .18 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(97% 0 0);--secondary-foreground:oklch(20.5% 0 0);--muted:oklch(97% 0 0);--muted-foreground:oklch(55.6% 0 0);--accent:oklch(97% 0 0);--accent-foreground:oklch(20.5% 0 0);--destructive:oklch(57.7% .245 27.325);--border:oklch(92.2% 0 0);--input:oklch(92.2% 0 0);--ring:oklch(70.8% 0 0);--chart-1:oklch(64.6% .222 41.116);--chart-2:oklch(60% .118 184.704);--chart-3:oklch(39.8% .07 227.392);--chart-4:oklch(82.8% .189 84.429);--chart-5:oklch(76.9% .188 70.08);--sidebar:oklch(98.5% 0 0);--sidebar-foreground:oklch(14.5% 0 0);--sidebar-primary:oklch(20.5% 0 0);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(97% 0 0);--sidebar-accent-foreground:oklch(20.5% 0 0);--sidebar-border:oklch(92.2% 0 0);--sidebar-ring:oklch(70.8% 0 0)}.dark{--background:oklch(14.5% 0 0);--foreground:oklch(98.5% 0 0);--card:oklch(20.5% 0 0);--card-foreground:oklch(98.5% 0 0);--popover:oklch(20.5% 0 0);--popover-foreground:oklch(98.5% 0 0);--primary:oklch(62% .2 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(26.9% 0 0);--secondary-foreground:oklch(98.5% 0 0);--muted:oklch(26.9% 0 0);--muted-foreground:oklch(70.8% 0 0);--accent:oklch(26.9% 0 0);--accent-foreground:oklch(98.5% 0 0);--destructive:oklch(70.4% .191 22.216);--border:oklch(100% 0 0/.1);--input:oklch(100% 0 0/.15);--ring:oklch(55.6% 0 0);--chart-1:oklch(48.8% .243 264.376);--chart-2:oklch(69.6% .17 162.48);--chart-3:oklch(76.9% .188 70.08);--chart-4:oklch(62.7% .265 303.9);--chart-5:oklch(64.5% .246 16.439);--sidebar:oklch(20.5% 0 0);--sidebar-foreground:oklch(98.5% 0 0);--sidebar-primary:oklch(48.8% .243 264.376);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(26.9% 0 0);--sidebar-accent-foreground:oklch(98.5% 0 0);--sidebar-border:oklch(100% 0 0/.1);--sidebar-ring:oklch(55.6% 0 0)}.workflow-chat-view .border-green-200{border-color:var(--color-emerald-200)}.workflow-chat-view .bg-green-50{background-color:var(--color-emerald-50)}.workflow-chat-view .bg-green-100{background-color:var(--color-emerald-100)}.workflow-chat-view .text-green-600{color:var(--color-emerald-600)}.workflow-chat-view .text-green-700{color:var(--color-emerald-700)}.workflow-chat-view .text-green-800{color:var(--color-emerald-800)}.highlight-attention{animation:1s ease-out highlight-flash}@keyframes highlight-flash{0%{background-color:#fb923c4d;transform:scale(1.02)}to{background-color:#0000;transform:scale(1)}}.hil-waiting-glow{animation:2s infinite pulse-glow;box-shadow:0 0 #fb923c66,inset 0 0 0 1px #fb923c33}@keyframes pulse-glow{0%,to{box-shadow:0 0 #fb923c66,inset 0 0 0 1px #fb923c33}50%{box-shadow:0 0 20px 5px #fb923c33,inset 0 0 0 2px #fb923c4d}}@property --tw-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-z{syntax:"*";inherits:false;initial-value:0}@property --tw-scale-x{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-y{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-z{syntax:"*";inherits:false;initial-value:1}@property --tw-rotate-x{syntax:"*";inherits:false}@property --tw-rotate-y{syntax:"*";inherits:false}@property --tw-rotate-z{syntax:"*";inherits:false}@property --tw-skew-x{syntax:"*";inherits:false}@property --tw-skew-y{syntax:"*";inherits:false}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-space-x-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-leading{syntax:"*";inherits:false}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-tracking{syntax:"*";inherits:false}@property --tw-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-shadow-color{syntax:"*";inherits:false}@property --tw-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-inset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-shadow-color{syntax:"*";inherits:false}@property --tw-inset-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-ring-color{syntax:"*";inherits:false}@property --tw-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-ring-color{syntax:"*";inherits:false}@property --tw-inset-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-ring-inset{syntax:"*";inherits:false}@property --tw-ring-offset-width{syntax:"";inherits:false;initial-value:0}@property --tw-ring-offset-color{syntax:"*";inherits:false;initial-value:#fff}@property --tw-ring-offset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-outline-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-blur{syntax:"*";inherits:false}@property --tw-brightness{syntax:"*";inherits:false}@property --tw-contrast{syntax:"*";inherits:false}@property --tw-grayscale{syntax:"*";inherits:false}@property --tw-hue-rotate{syntax:"*";inherits:false}@property --tw-invert{syntax:"*";inherits:false}@property --tw-opacity{syntax:"*";inherits:false}@property --tw-saturate{syntax:"*";inherits:false}@property --tw-sepia{syntax:"*";inherits:false}@property --tw-drop-shadow{syntax:"*";inherits:false}@property --tw-drop-shadow-color{syntax:"*";inherits:false}@property --tw-drop-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-drop-shadow-size{syntax:"*";inherits:false}@property --tw-backdrop-blur{syntax:"*";inherits:false}@property --tw-backdrop-brightness{syntax:"*";inherits:false}@property --tw-backdrop-contrast{syntax:"*";inherits:false}@property --tw-backdrop-grayscale{syntax:"*";inherits:false}@property --tw-backdrop-hue-rotate{syntax:"*";inherits:false}@property --tw-backdrop-invert{syntax:"*";inherits:false}@property --tw-backdrop-opacity{syntax:"*";inherits:false}@property --tw-backdrop-saturate{syntax:"*";inherits:false}@property --tw-backdrop-sepia{syntax:"*";inherits:false}@property --tw-duration{syntax:"*";inherits:false}@property --tw-ease{syntax:"*";inherits:false}@keyframes spin{to{transform:rotate(360deg)}}@keyframes pulse{50%{opacity:.5}}@keyframes bounce{0%,to{animation-timing-function:cubic-bezier(.8,0,1,1);transform:translateY(-25%)}50%{animation-timing-function:cubic-bezier(0,0,.2,1);transform:none}}@keyframes enter{0%{opacity:var(--tw-enter-opacity,1);transform:translate3d(var(--tw-enter-translate-x,0),var(--tw-enter-translate-y,0),0)scale3d(var(--tw-enter-scale,1),var(--tw-enter-scale,1),var(--tw-enter-scale,1))rotate(var(--tw-enter-rotate,0));filter:blur(var(--tw-enter-blur,0))}}@keyframes exit{to{opacity:var(--tw-exit-opacity,1);transform:translate3d(var(--tw-exit-translate-x,0),var(--tw-exit-translate-y,0),0)scale3d(var(--tw-exit-scale,1),var(--tw-exit-scale,1),var(--tw-exit-scale,1))rotate(var(--tw-exit-rotate,0));filter:blur(var(--tw-exit-blur,0))}}.react-flow{direction:ltr;--xy-edge-stroke-default: #b1b1b7;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #555;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(255, 255, 255, .5);--xy-minimap-background-color-default: #fff;--xy-minimap-mask-background-color-default: rgba(240, 240, 240, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #e2e2e2;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: transparent;--xy-background-pattern-dots-color-default: #91919a;--xy-background-pattern-lines-color-default: #eee;--xy-background-pattern-cross-color-default: #e2e2e2;background-color:var(--xy-background-color, var(--xy-background-color-default));--xy-node-color-default: inherit;--xy-node-border-default: 1px solid #1a192b;--xy-node-background-color-default: #fff;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(0, 0, 0, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #1a192b;--xy-node-border-radius-default: 3px;--xy-handle-background-color-default: #1a192b;--xy-handle-border-color-default: #fff;--xy-selection-background-color-default: rgba(0, 89, 220, .08);--xy-selection-border-default: 1px dotted rgba(0, 89, 220, .8);--xy-controls-button-background-color-default: #fefefe;--xy-controls-button-background-color-hover-default: #f4f4f4;--xy-controls-button-color-default: inherit;--xy-controls-button-color-hover-default: inherit;--xy-controls-button-border-color-default: #eee;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #ffffff;--xy-edge-label-color-default: inherit;--xy-resize-background-color-default: #3367d9}.react-flow.dark{--xy-edge-stroke-default: #3e3e3e;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #727272;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(150, 150, 150, .25);--xy-minimap-background-color-default: #141414;--xy-minimap-mask-background-color-default: rgba(60, 60, 60, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #2b2b2b;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: #141414;--xy-background-pattern-dots-color-default: #777;--xy-background-pattern-lines-color-default: #777;--xy-background-pattern-cross-color-default: #777;--xy-node-color-default: #f8f8f8;--xy-node-border-default: 1px solid #3c3c3c;--xy-node-background-color-default: #1e1e1e;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(255, 255, 255, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #999;--xy-handle-background-color-default: #bebebe;--xy-handle-border-color-default: #1e1e1e;--xy-selection-background-color-default: rgba(200, 200, 220, .08);--xy-selection-border-default: 1px dotted rgba(200, 200, 220, .8);--xy-controls-button-background-color-default: #2b2b2b;--xy-controls-button-background-color-hover-default: #3e3e3e;--xy-controls-button-color-default: #f8f8f8;--xy-controls-button-color-hover-default: #fff;--xy-controls-button-border-color-default: #5b5b5b;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #141414;--xy-edge-label-color-default: #f8f8f8}.react-flow__background{background-color:var(--xy-background-color-props, var(--xy-background-color, var(--xy-background-color-default)));pointer-events:none;z-index:-1}.react-flow__container{position:absolute;width:100%;height:100%;top:0;left:0}.react-flow__pane{z-index:1}.react-flow__pane.draggable{cursor:grab}.react-flow__pane.dragging{cursor:grabbing}.react-flow__pane.selection{cursor:pointer}.react-flow__viewport{transform-origin:0 0;z-index:2;pointer-events:none}.react-flow__renderer{z-index:4}.react-flow__selection{z-index:6}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible{outline:none}.react-flow__edge-path{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default));stroke-width:var(--xy-edge-stroke-width, var(--xy-edge-stroke-width-default));fill:none}.react-flow__connection-path{stroke:var(--xy-connectionline-stroke, var(--xy-connectionline-stroke-default));stroke-width:var(--xy-connectionline-stroke-width, var(--xy-connectionline-stroke-width-default));fill:none}.react-flow .react-flow__edges{position:absolute}.react-flow .react-flow__edges svg{overflow:visible;position:absolute;pointer-events:none}.react-flow__edge{pointer-events:visibleStroke}.react-flow__edge.selectable{cursor:pointer}.react-flow__edge.animated path{stroke-dasharray:5;animation:dashdraw .5s linear infinite}.react-flow__edge.animated path.react-flow__edge-interaction{stroke-dasharray:none;animation:none}.react-flow__edge.inactive{pointer-events:none}.react-flow__edge.selected,.react-flow__edge:focus,.react-flow__edge:focus-visible{outline:none}.react-flow__edge.selected .react-flow__edge-path,.react-flow__edge.selectable:focus .react-flow__edge-path,.react-flow__edge.selectable:focus-visible .react-flow__edge-path{stroke:var(--xy-edge-stroke-selected, var(--xy-edge-stroke-selected-default))}.react-flow__edge-textwrapper{pointer-events:all}.react-flow__edge .react-flow__edge-text{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__arrowhead polyline{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__arrowhead polyline.arrowclosed{fill:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__connection{pointer-events:none}.react-flow__connection .animated{stroke-dasharray:5;animation:dashdraw .5s linear infinite}svg.react-flow__connectionline{z-index:1001;overflow:visible;position:absolute}.react-flow__nodes{pointer-events:none;transform-origin:0 0}.react-flow__node{position:absolute;-webkit-user-select:none;-moz-user-select:none;user-select:none;pointer-events:all;transform-origin:0 0;box-sizing:border-box;cursor:default}.react-flow__node.selectable{cursor:pointer}.react-flow__node.draggable{cursor:grab;pointer-events:all}.react-flow__node.draggable.dragging{cursor:grabbing}.react-flow__nodesselection{z-index:3;transform-origin:left top;pointer-events:none}.react-flow__nodesselection-rect{position:absolute;pointer-events:all;cursor:grab}.react-flow__handle{position:absolute;pointer-events:none;min-width:5px;min-height:5px;width:6px;height:6px;background-color:var(--xy-handle-background-color, var(--xy-handle-background-color-default));border:1px solid var(--xy-handle-border-color, var(--xy-handle-border-color-default));border-radius:100%}.react-flow__handle.connectingfrom{pointer-events:all}.react-flow__handle.connectionindicator{pointer-events:all;cursor:crosshair}.react-flow__handle-bottom{top:auto;left:50%;bottom:0;transform:translate(-50%,50%)}.react-flow__handle-top{top:0;left:50%;transform:translate(-50%,-50%)}.react-flow__handle-left{top:50%;left:0;transform:translate(-50%,-50%)}.react-flow__handle-right{top:50%;right:0;transform:translate(50%,-50%)}.react-flow__edgeupdater{cursor:move;pointer-events:all}.react-flow__pane.selection .react-flow__panel{pointer-events:none}.react-flow__panel{position:absolute;z-index:5;margin:15px}.react-flow__panel.top{top:0}.react-flow__panel.bottom{bottom:0}.react-flow__panel.top.center,.react-flow__panel.bottom.center{left:50%;transform:translate(-15px) translate(-50%)}.react-flow__panel.left{left:0}.react-flow__panel.right{right:0}.react-flow__panel.left.center,.react-flow__panel.right.center{top:50%;transform:translateY(-15px) translateY(-50%)}.react-flow__attribution{font-size:10px;background:var(--xy-attribution-background-color, var(--xy-attribution-background-color-default));padding:2px 3px;margin:0}.react-flow__attribution a{text-decoration:none;color:#999}@keyframes dashdraw{0%{stroke-dashoffset:10}}.react-flow__edgelabel-renderer{position:absolute;width:100%;height:100%;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none;left:0;top:0}.react-flow__viewport-portal{position:absolute;width:100%;height:100%;left:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__minimap{background:var( --xy-minimap-background-color-props, var(--xy-minimap-background-color, var(--xy-minimap-background-color-default)) )}.react-flow__minimap-svg{display:block}.react-flow__minimap-mask{fill:var( --xy-minimap-mask-background-color-props, var(--xy-minimap-mask-background-color, var(--xy-minimap-mask-background-color-default)) );stroke:var( --xy-minimap-mask-stroke-color-props, var(--xy-minimap-mask-stroke-color, var(--xy-minimap-mask-stroke-color-default)) );stroke-width:var( --xy-minimap-mask-stroke-width-props, var(--xy-minimap-mask-stroke-width, var(--xy-minimap-mask-stroke-width-default)) )}.react-flow__minimap-node{fill:var( --xy-minimap-node-background-color-props, var(--xy-minimap-node-background-color, var(--xy-minimap-node-background-color-default)) );stroke:var( --xy-minimap-node-stroke-color-props, var(--xy-minimap-node-stroke-color, var(--xy-minimap-node-stroke-color-default)) );stroke-width:var( --xy-minimap-node-stroke-width-props, var(--xy-minimap-node-stroke-width, var(--xy-minimap-node-stroke-width-default)) )}.react-flow__background-pattern.dots{fill:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-dots-color-default)) )}.react-flow__background-pattern.lines{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-lines-color-default)) )}.react-flow__background-pattern.cross{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-cross-color-default)) )}.react-flow__controls{display:flex;flex-direction:column;box-shadow:var(--xy-controls-box-shadow, var(--xy-controls-box-shadow-default))}.react-flow__controls.horizontal{flex-direction:row}.react-flow__controls-button{display:flex;justify-content:center;align-items:center;height:26px;width:26px;padding:4px;border:none;background:var(--xy-controls-button-background-color, var(--xy-controls-button-background-color-default));border-bottom:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) );color:var( --xy-controls-button-color-props, var(--xy-controls-button-color, var(--xy-controls-button-color-default)) );cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__controls-button svg{width:100%;max-width:12px;max-height:12px;fill:currentColor}.react-flow__edge.updating .react-flow__edge-path{stroke:#777}.react-flow__edge-text{font-size:10px}.react-flow__node.selectable:focus,.react-flow__node.selectable:focus-visible{outline:none}.react-flow__node-input,.react-flow__node-default,.react-flow__node-output,.react-flow__node-group{padding:10px;border-radius:var(--xy-node-border-radius, var(--xy-node-border-radius-default));width:150px;font-size:12px;color:var(--xy-node-color, var(--xy-node-color-default));text-align:center;border:var(--xy-node-border, var(--xy-node-border-default));background-color:var(--xy-node-background-color, var(--xy-node-background-color-default))}.react-flow__node-input.selectable:hover,.react-flow__node-default.selectable:hover,.react-flow__node-output.selectable:hover,.react-flow__node-group.selectable:hover{box-shadow:var(--xy-node-boxshadow-hover, var(--xy-node-boxshadow-hover-default))}.react-flow__node-input.selectable.selected,.react-flow__node-input.selectable:focus,.react-flow__node-input.selectable:focus-visible,.react-flow__node-default.selectable.selected,.react-flow__node-default.selectable:focus,.react-flow__node-default.selectable:focus-visible,.react-flow__node-output.selectable.selected,.react-flow__node-output.selectable:focus,.react-flow__node-output.selectable:focus-visible,.react-flow__node-group.selectable.selected,.react-flow__node-group.selectable:focus,.react-flow__node-group.selectable:focus-visible{box-shadow:var(--xy-node-boxshadow-selected, var(--xy-node-boxshadow-selected-default))}.react-flow__node-group{background-color:var(--xy-node-group-background-color, var(--xy-node-group-background-color-default))}.react-flow__nodesselection-rect,.react-flow__selection{background:var(--xy-selection-background-color, var(--xy-selection-background-color-default));border:var(--xy-selection-border, var(--xy-selection-border-default))}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible,.react-flow__selection:focus,.react-flow__selection:focus-visible{outline:none}.react-flow__controls-button:hover{background:var( --xy-controls-button-background-color-hover-props, var(--xy-controls-button-background-color-hover, var(--xy-controls-button-background-color-hover-default)) );color:var( --xy-controls-button-color-hover-props, var(--xy-controls-button-color-hover, var(--xy-controls-button-color-hover-default)) )}.react-flow__controls-button:disabled{pointer-events:none}.react-flow__controls-button:disabled svg{fill-opacity:.4}.react-flow__controls-button:last-child{border-bottom:none}.react-flow__controls.horizontal .react-flow__controls-button{border-bottom:none;border-right:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) )}.react-flow__controls.horizontal .react-flow__controls-button:last-child{border-right:none}.react-flow__resize-control{position:absolute}.react-flow__resize-control.left,.react-flow__resize-control.right{cursor:ew-resize}.react-flow__resize-control.top,.react-flow__resize-control.bottom{cursor:ns-resize}.react-flow__resize-control.top.left,.react-flow__resize-control.bottom.right{cursor:nwse-resize}.react-flow__resize-control.bottom.left,.react-flow__resize-control.top.right{cursor:nesw-resize}.react-flow__resize-control.handle{width:5px;height:5px;border:1px solid #fff;border-radius:1px;background-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));translate:-50% -50%}.react-flow__resize-control.handle.left{left:0;top:50%}.react-flow__resize-control.handle.right{left:100%;top:50%}.react-flow__resize-control.handle.top{left:50%;top:0}.react-flow__resize-control.handle.bottom{left:50%;top:100%}.react-flow__resize-control.handle.top.left,.react-flow__resize-control.handle.bottom.left{left:0}.react-flow__resize-control.handle.top.right,.react-flow__resize-control.handle.bottom.right{left:100%}.react-flow__resize-control.line{border-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));border-width:0;border-style:solid}.react-flow__resize-control.line.left,.react-flow__resize-control.line.right{width:1px;transform:translate(-50%);top:0;height:100%}.react-flow__resize-control.line.left{left:0;border-left-width:1px}.react-flow__resize-control.line.right{left:100%;border-right-width:1px}.react-flow__resize-control.line.top,.react-flow__resize-control.line.bottom{height:1px;transform:translateY(-50%);left:0;width:100%}.react-flow__resize-control.line.top{top:0;border-top-width:1px}.react-flow__resize-control.line.bottom{border-bottom-width:1px;top:100%}.react-flow__edge-textbg{fill:var(--xy-edge-label-background-color, var(--xy-edge-label-background-color-default))}.react-flow__edge-text{fill:var(--xy-edge-label-color, var(--xy-edge-label-color-default))} +/*! tailwindcss v4.1.12 | MIT License | https://tailwindcss.com */@layer properties{@supports (((-webkit-hyphens:none)) and (not (margin-trim:inline))) or ((-moz-orient:inline) and (not (color:rgb(from red r g b)))){*,:before,:after,::backdrop{--tw-translate-x:0;--tw-translate-y:0;--tw-translate-z:0;--tw-scale-x:1;--tw-scale-y:1;--tw-scale-z:1;--tw-rotate-x:initial;--tw-rotate-y:initial;--tw-rotate-z:initial;--tw-skew-x:initial;--tw-skew-y:initial;--tw-space-y-reverse:0;--tw-space-x-reverse:0;--tw-border-style:solid;--tw-leading:initial;--tw-font-weight:initial;--tw-tracking:initial;--tw-shadow:0 0 #0000;--tw-shadow-color:initial;--tw-shadow-alpha:100%;--tw-inset-shadow:0 0 #0000;--tw-inset-shadow-color:initial;--tw-inset-shadow-alpha:100%;--tw-ring-color:initial;--tw-ring-shadow:0 0 #0000;--tw-inset-ring-color:initial;--tw-inset-ring-shadow:0 0 #0000;--tw-ring-inset:initial;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-offset-shadow:0 0 #0000;--tw-outline-style:solid;--tw-blur:initial;--tw-brightness:initial;--tw-contrast:initial;--tw-grayscale:initial;--tw-hue-rotate:initial;--tw-invert:initial;--tw-opacity:initial;--tw-saturate:initial;--tw-sepia:initial;--tw-drop-shadow:initial;--tw-drop-shadow-color:initial;--tw-drop-shadow-alpha:100%;--tw-drop-shadow-size:initial;--tw-backdrop-blur:initial;--tw-backdrop-brightness:initial;--tw-backdrop-contrast:initial;--tw-backdrop-grayscale:initial;--tw-backdrop-hue-rotate:initial;--tw-backdrop-invert:initial;--tw-backdrop-opacity:initial;--tw-backdrop-saturate:initial;--tw-backdrop-sepia:initial;--tw-duration:initial;--tw-ease:initial;--tw-animation-delay:0s;--tw-animation-direction:normal;--tw-animation-duration:initial;--tw-animation-fill-mode:none;--tw-animation-iteration-count:1;--tw-enter-blur:0;--tw-enter-opacity:1;--tw-enter-rotate:0;--tw-enter-scale:1;--tw-enter-translate-x:0;--tw-enter-translate-y:0;--tw-exit-blur:0;--tw-exit-opacity:1;--tw-exit-rotate:0;--tw-exit-scale:1;--tw-exit-translate-x:0;--tw-exit-translate-y:0}}}@layer theme{:root,:host{--font-sans:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-mono:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--color-red-50:oklch(97.1% .013 17.38);--color-red-100:oklch(93.6% .032 17.717);--color-red-200:oklch(88.5% .062 18.334);--color-red-400:oklch(70.4% .191 22.216);--color-red-500:oklch(63.7% .237 25.331);--color-red-600:oklch(57.7% .245 27.325);--color-red-700:oklch(50.5% .213 27.518);--color-red-800:oklch(44.4% .177 26.899);--color-red-900:oklch(39.6% .141 25.723);--color-red-950:oklch(25.8% .092 26.042);--color-orange-50:oklch(98% .016 73.684);--color-orange-100:oklch(95.4% .038 75.164);--color-orange-200:oklch(90.1% .076 70.697);--color-orange-300:oklch(83.7% .128 66.29);--color-orange-400:oklch(75% .183 55.934);--color-orange-500:oklch(70.5% .213 47.604);--color-orange-600:oklch(64.6% .222 41.116);--color-orange-700:oklch(55.3% .195 38.402);--color-orange-800:oklch(47% .157 37.304);--color-orange-900:oklch(40.8% .123 38.172);--color-orange-950:oklch(26.6% .079 36.259);--color-amber-50:oklch(98.7% .022 95.277);--color-amber-100:oklch(96.2% .059 95.617);--color-amber-200:oklch(92.4% .12 95.746);--color-amber-300:oklch(87.9% .169 91.605);--color-amber-400:oklch(82.8% .189 84.429);--color-amber-500:oklch(76.9% .188 70.08);--color-amber-600:oklch(66.6% .179 58.318);--color-amber-700:oklch(55.5% .163 48.998);--color-amber-800:oklch(47.3% .137 46.201);--color-amber-900:oklch(41.4% .112 45.904);--color-amber-950:oklch(27.9% .077 45.635);--color-yellow-100:oklch(97.3% .071 103.193);--color-yellow-200:oklch(94.5% .129 101.54);--color-yellow-400:oklch(85.2% .199 91.936);--color-yellow-600:oklch(68.1% .162 75.834);--color-yellow-700:oklch(55.4% .135 66.442);--color-green-50:oklch(98.2% .018 155.826);--color-green-100:oklch(96.2% .044 156.743);--color-green-200:oklch(92.5% .084 155.995);--color-green-300:oklch(87.1% .15 154.449);--color-green-400:oklch(79.2% .209 151.711);--color-green-500:oklch(72.3% .219 149.579);--color-green-600:oklch(62.7% .194 149.214);--color-green-700:oklch(52.7% .154 150.069);--color-green-800:oklch(44.8% .119 151.328);--color-green-900:oklch(39.3% .095 152.535);--color-green-950:oklch(26.6% .065 152.934);--color-emerald-50:oklch(97.9% .021 166.113);--color-emerald-100:oklch(95% .052 163.051);--color-emerald-200:oklch(90.5% .093 164.15);--color-emerald-400:oklch(76.5% .177 163.223);--color-emerald-500:oklch(69.6% .17 162.48);--color-emerald-600:oklch(59.6% .145 163.225);--color-emerald-700:oklch(50.8% .118 165.612);--color-emerald-800:oklch(43.2% .095 166.913);--color-blue-50:oklch(97% .014 254.604);--color-blue-100:oklch(93.2% .032 255.585);--color-blue-200:oklch(88.2% .059 254.128);--color-blue-300:oklch(80.9% .105 251.813);--color-blue-400:oklch(70.7% .165 254.624);--color-blue-500:oklch(62.3% .214 259.815);--color-blue-600:oklch(54.6% .245 262.881);--color-blue-700:oklch(48.8% .243 264.376);--color-blue-800:oklch(42.4% .199 265.638);--color-blue-900:oklch(37.9% .146 265.522);--color-blue-950:oklch(28.2% .091 267.935);--color-purple-50:oklch(97.7% .014 308.299);--color-purple-100:oklch(94.6% .033 307.174);--color-purple-200:oklch(90.2% .063 306.703);--color-purple-400:oklch(71.4% .203 305.504);--color-purple-500:oklch(62.7% .265 303.9);--color-purple-600:oklch(55.8% .288 302.321);--color-purple-800:oklch(43.8% .218 303.724);--color-purple-900:oklch(38.1% .176 304.987);--color-gray-50:oklch(98.5% .002 247.839);--color-gray-100:oklch(96.7% .003 264.542);--color-gray-200:oklch(92.8% .006 264.531);--color-gray-300:oklch(87.2% .01 258.338);--color-gray-400:oklch(70.7% .022 261.325);--color-gray-500:oklch(55.1% .027 264.364);--color-gray-600:oklch(44.6% .03 256.802);--color-gray-700:oklch(37.3% .034 259.733);--color-gray-800:oklch(27.8% .033 256.848);--color-gray-900:oklch(21% .034 264.665);--color-black:#000;--color-white:#fff;--spacing:.25rem;--container-md:28rem;--container-lg:32rem;--container-2xl:42rem;--container-3xl:48rem;--container-4xl:56rem;--container-5xl:64rem;--container-6xl:72rem;--container-7xl:80rem;--text-xs:.75rem;--text-xs--line-height:calc(1/.75);--text-sm:.875rem;--text-sm--line-height:calc(1.25/.875);--text-base:1rem;--text-base--line-height: 1.5 ;--text-lg:1.125rem;--text-lg--line-height:calc(1.75/1.125);--text-xl:1.25rem;--text-xl--line-height:calc(1.75/1.25);--text-2xl:1.5rem;--text-2xl--line-height:calc(2/1.5);--font-weight-medium:500;--font-weight-semibold:600;--font-weight-bold:700;--tracking-tight:-.025em;--tracking-wide:.025em;--tracking-wider:.05em;--tracking-widest:.1em;--leading-tight:1.25;--leading-relaxed:1.625;--drop-shadow-lg:0 4px 4px #00000026;--ease-out:cubic-bezier(0,0,.2,1);--ease-in-out:cubic-bezier(.4,0,.2,1);--animate-spin:spin 1s linear infinite;--animate-pulse:pulse 2s cubic-bezier(.4,0,.6,1)infinite;--animate-bounce:bounce 1s infinite;--blur-sm:8px;--default-transition-duration:.15s;--default-transition-timing-function:cubic-bezier(.4,0,.2,1);--default-font-family:var(--font-sans);--default-mono-font-family:var(--font-mono)}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1}@supports (not ((-webkit-appearance:-apple-pay-button))) or (contain-intrinsic-size:1px){::placeholder{color:currentColor}@supports (color:color-mix(in lab,red,red)){::placeholder{color:color-mix(in oklab,currentcolor 50%,transparent)}}}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}::-webkit-calendar-picker-indicator{line-height:1}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){appearance:button}::file-selector-button{appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}*{border-color:var(--border);outline-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){*{outline-color:color-mix(in oklab,var(--ring)50%,transparent)}}body{background-color:var(--background);color:var(--foreground)}}@layer components;@layer utilities{.\@container\/card-header{container:card-header/inline-size}.pointer-events-none{pointer-events:none}.collapse{visibility:collapse}.visible{visibility:visible}.sr-only{clip:rect(0,0,0,0);white-space:nowrap;border-width:0;width:1px;height:1px;margin:-1px;padding:0;position:absolute;overflow:hidden}.absolute{position:absolute}.fixed{position:fixed}.relative{position:relative}.static{position:static}.inset-0{inset:calc(var(--spacing)*0)}.inset-2{inset:calc(var(--spacing)*2)}.inset-y-0{inset-block:calc(var(--spacing)*0)}.top-0{top:calc(var(--spacing)*0)}.top-1{top:calc(var(--spacing)*1)}.top-2{top:calc(var(--spacing)*2)}.top-4{top:calc(var(--spacing)*4)}.top-\[30px\]{top:30px}.-right-2{right:calc(var(--spacing)*-2)}.right-0{right:calc(var(--spacing)*0)}.right-1{right:calc(var(--spacing)*1)}.right-2{right:calc(var(--spacing)*2)}.right-4{right:calc(var(--spacing)*4)}.bottom-0{bottom:calc(var(--spacing)*0)}.bottom-24{bottom:calc(var(--spacing)*24)}.-left-2{left:calc(var(--spacing)*-2)}.left-0{left:calc(var(--spacing)*0)}.left-1\/2{left:50%}.left-2{left:calc(var(--spacing)*2)}.left-\[18px\]{left:18px}.z-10{z-index:10}.z-20{z-index:20}.z-50{z-index:50}.col-start-2{grid-column-start:2}.row-span-2{grid-row:span 2/span 2}.row-start-1{grid-row-start:1}.container{width:100%}@media (min-width:40rem){.container{max-width:40rem}}@media (min-width:48rem){.container{max-width:48rem}}@media (min-width:64rem){.container{max-width:64rem}}@media (min-width:80rem){.container{max-width:80rem}}@media (min-width:96rem){.container{max-width:96rem}}.container\!{width:100%!important}@media (min-width:40rem){.container\!{max-width:40rem!important}}@media (min-width:48rem){.container\!{max-width:48rem!important}}@media (min-width:64rem){.container\!{max-width:64rem!important}}@media (min-width:80rem){.container\!{max-width:80rem!important}}@media (min-width:96rem){.container\!{max-width:96rem!important}}.m-2{margin:calc(var(--spacing)*2)}.-mx-1{margin-inline:calc(var(--spacing)*-1)}.mx-0\.5{margin-inline:calc(var(--spacing)*.5)}.mx-4{margin-inline:calc(var(--spacing)*4)}.mx-auto{margin-inline:auto}.my-1{margin-block:calc(var(--spacing)*1)}.my-2{margin-block:calc(var(--spacing)*2)}.my-3{margin-block:calc(var(--spacing)*3)}.my-4{margin-block:calc(var(--spacing)*4)}.mt-0{margin-top:calc(var(--spacing)*0)}.mt-0\.5{margin-top:calc(var(--spacing)*.5)}.mt-1{margin-top:calc(var(--spacing)*1)}.mt-2{margin-top:calc(var(--spacing)*2)}.mt-3{margin-top:calc(var(--spacing)*3)}.mt-4{margin-top:calc(var(--spacing)*4)}.mt-12{margin-top:calc(var(--spacing)*12)}.mr-1{margin-right:calc(var(--spacing)*1)}.mr-2{margin-right:calc(var(--spacing)*2)}.mb-1{margin-bottom:calc(var(--spacing)*1)}.mb-2{margin-bottom:calc(var(--spacing)*2)}.mb-3{margin-bottom:calc(var(--spacing)*3)}.mb-4{margin-bottom:calc(var(--spacing)*4)}.mb-6{margin-bottom:calc(var(--spacing)*6)}.mb-8{margin-bottom:calc(var(--spacing)*8)}.ml-0{margin-left:calc(var(--spacing)*0)}.ml-1{margin-left:calc(var(--spacing)*1)}.ml-2{margin-left:calc(var(--spacing)*2)}.ml-3{margin-left:calc(var(--spacing)*3)}.ml-4{margin-left:calc(var(--spacing)*4)}.ml-5{margin-left:calc(var(--spacing)*5)}.ml-6{margin-left:calc(var(--spacing)*6)}.ml-auto{margin-left:auto}.line-clamp-2{-webkit-line-clamp:2;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.line-clamp-3{-webkit-line-clamp:3;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.block{display:block}.contents{display:contents}.flex{display:flex}.grid{display:grid}.hidden{display:none}.inline{display:inline}.inline-block{display:inline-block}.inline-flex{display:inline-flex}.table{display:table}.field-sizing-content{field-sizing:content}.size-2{width:calc(var(--spacing)*2);height:calc(var(--spacing)*2)}.size-3\.5{width:calc(var(--spacing)*3.5);height:calc(var(--spacing)*3.5)}.size-4{width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.size-9{width:calc(var(--spacing)*9);height:calc(var(--spacing)*9)}.\!h-2{height:calc(var(--spacing)*2)!important}.h-0{height:calc(var(--spacing)*0)}.h-0\.5{height:calc(var(--spacing)*.5)}.h-1{height:calc(var(--spacing)*1)}.h-2{height:calc(var(--spacing)*2)}.h-2\.5{height:calc(var(--spacing)*2.5)}.h-3{height:calc(var(--spacing)*3)}.h-3\.5{height:calc(var(--spacing)*3.5)}.h-4{height:calc(var(--spacing)*4)}.h-5{height:calc(var(--spacing)*5)}.h-6{height:calc(var(--spacing)*6)}.h-7{height:calc(var(--spacing)*7)}.h-8{height:calc(var(--spacing)*8)}.h-9{height:calc(var(--spacing)*9)}.h-10{height:calc(var(--spacing)*10)}.h-12{height:calc(var(--spacing)*12)}.h-14{height:calc(var(--spacing)*14)}.h-16{height:calc(var(--spacing)*16)}.h-32{height:calc(var(--spacing)*32)}.h-\[1\.2rem\]{height:1.2rem}.h-\[1px\]{height:1px}.h-\[85vh\]{height:85vh}.h-\[500px\]{height:500px}.h-\[calc\(100\%\+8px\)\]{height:calc(100% + 8px)}.h-\[calc\(100vh-3\.5rem\)\]{height:calc(100vh - 3.5rem)}.h-\[calc\(100vh-3\.7rem\)\]{height:calc(100vh - 3.7rem)}.h-\[var\(--radix-select-trigger-height\)\]{height:var(--radix-select-trigger-height)}.h-full{height:100%}.h-px{height:1px}.h-screen{height:100vh}.max-h-\(--radix-dropdown-menu-content-available-height\){max-height:var(--radix-dropdown-menu-content-available-height)}.max-h-\(--radix-select-content-available-height\){max-height:var(--radix-select-content-available-height)}.max-h-32{max-height:calc(var(--spacing)*32)}.max-h-40{max-height:calc(var(--spacing)*40)}.max-h-48{max-height:calc(var(--spacing)*48)}.max-h-60{max-height:calc(var(--spacing)*60)}.max-h-64{max-height:calc(var(--spacing)*64)}.max-h-\[85vh\]{max-height:85vh}.max-h-\[90vh\]{max-height:90vh}.max-h-\[200px\]{max-height:200px}.max-h-\[400px\]{max-height:400px}.max-h-none{max-height:none}.max-h-screen{max-height:100vh}.\!min-h-0{min-height:calc(var(--spacing)*0)!important}.min-h-0{min-height:calc(var(--spacing)*0)}.min-h-16{min-height:calc(var(--spacing)*16)}.min-h-\[36px\]{min-height:36px}.min-h-\[40px\]{min-height:40px}.min-h-\[50vh\]{min-height:50vh}.min-h-\[400px\]{min-height:400px}.min-h-screen{min-height:100vh}.\!w-2{width:calc(var(--spacing)*2)!important}.w-1{width:calc(var(--spacing)*1)}.w-2{width:calc(var(--spacing)*2)}.w-2\.5{width:calc(var(--spacing)*2.5)}.w-3{width:calc(var(--spacing)*3)}.w-3\.5{width:calc(var(--spacing)*3.5)}.w-4{width:calc(var(--spacing)*4)}.w-5{width:calc(var(--spacing)*5)}.w-6{width:calc(var(--spacing)*6)}.w-8{width:calc(var(--spacing)*8)}.w-9{width:calc(var(--spacing)*9)}.w-10{width:calc(var(--spacing)*10)}.w-12{width:calc(var(--spacing)*12)}.w-16{width:calc(var(--spacing)*16)}.w-20{width:calc(var(--spacing)*20)}.w-56{width:calc(var(--spacing)*56)}.w-64{width:calc(var(--spacing)*64)}.w-80{width:calc(var(--spacing)*80)}.w-\[1\.2rem\]{width:1.2rem}.w-\[1px\]{width:1px}.w-\[28rem\]{width:28rem}.w-\[90vw\]{width:90vw}.w-\[600px\]{width:600px}.w-\[800px\]{width:800px}.w-fit{width:fit-content}.w-full{width:100%}.w-px{width:1px}.max-w-2xl{max-width:var(--container-2xl)}.max-w-3xl{max-width:var(--container-3xl)}.max-w-4xl{max-width:var(--container-4xl)}.max-w-6xl{max-width:var(--container-6xl)}.max-w-7xl{max-width:var(--container-7xl)}.max-w-\[80\%\]{max-width:80%}.max-w-\[90vw\]{max-width:90vw}.max-w-\[200px\]{max-width:200px}.max-w-full{max-width:100%}.max-w-lg{max-width:var(--container-lg)}.max-w-md{max-width:var(--container-md)}.max-w-none{max-width:none}.\!min-w-0{min-width:calc(var(--spacing)*0)!important}.min-w-0{min-width:calc(var(--spacing)*0)}.min-w-\[1\.25rem\]{min-width:1.25rem}.min-w-\[8rem\]{min-width:8rem}.min-w-\[50px\]{min-width:50px}.min-w-\[80px\]{min-width:80px}.min-w-\[300px\]{min-width:300px}.min-w-\[400px\]{min-width:400px}.min-w-\[800px\]{min-width:800px}.min-w-\[var\(--radix-select-trigger-width\)\]{min-width:var(--radix-select-trigger-width)}.min-w-full{min-width:100%}.flex-1{flex:1}.flex-shrink-0,.shrink-0{flex-shrink:0}.origin-\(--radix-dropdown-menu-content-transform-origin\){transform-origin:var(--radix-dropdown-menu-content-transform-origin)}.origin-\(--radix-select-content-transform-origin\){transform-origin:var(--radix-select-content-transform-origin)}.origin-bottom{transform-origin:bottom}.-translate-x-1\/2{--tw-translate-x: -50% ;translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-0{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-4{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.scale-0{--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-75{--tw-scale-x:75%;--tw-scale-y:75%;--tw-scale-z:75%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-100{--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.rotate-0{rotate:none}.rotate-90{rotate:90deg}.transform{transform:var(--tw-rotate-x,)var(--tw-rotate-y,)var(--tw-rotate-z,)var(--tw-skew-x,)var(--tw-skew-y,)}.animate-bounce{animation:var(--animate-bounce)}.animate-in{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.animate-pulse{animation:var(--animate-pulse)}.animate-spin{animation:var(--animate-spin)}.cursor-col-resize{cursor:col-resize}.cursor-default{cursor:default}.cursor-pointer{cursor:pointer}.touch-none{touch-action:none}.resize{resize:both}.resize-none{resize:none}.scroll-my-1{scroll-margin-block:calc(var(--spacing)*1)}.list-inside{list-style-position:inside}.list-decimal{list-style-type:decimal}.list-disc{list-style-type:disc}.list-none{list-style-type:none}.auto-rows-min{grid-auto-rows:min-content}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.grid-cols-\[auto_auto_1fr_auto\]{grid-template-columns:auto auto 1fr auto}.grid-rows-\[auto_auto\]{grid-template-rows:auto auto}.flex-col{flex-direction:column}.flex-row-reverse{flex-direction:row-reverse}.flex-wrap{flex-wrap:wrap}.items-center{align-items:center}.items-end{align-items:flex-end}.items-start{align-items:flex-start}.items-stretch{align-items:stretch}.justify-between{justify-content:space-between}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}.gap-0{gap:calc(var(--spacing)*0)}.gap-1{gap:calc(var(--spacing)*1)}.gap-1\.5{gap:calc(var(--spacing)*1.5)}.gap-2{gap:calc(var(--spacing)*2)}.gap-3{gap:calc(var(--spacing)*3)}.gap-4{gap:calc(var(--spacing)*4)}.gap-6{gap:calc(var(--spacing)*6)}:where(.space-y-0\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-2>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*2)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-3>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*3)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*3)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-4>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*4)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*4)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-6>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*6)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*6)*calc(1 - var(--tw-space-y-reverse)))}.gap-x-4{column-gap:calc(var(--spacing)*4)}:where(.space-x-1>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*1)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-x-reverse)))}:where(.space-x-2>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*2)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-x-reverse)))}.gap-y-1{row-gap:calc(var(--spacing)*1)}.self-start{align-self:flex-start}.justify-self-end{justify-self:flex-end}.truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.overflow-auto{overflow:auto}.overflow-hidden{overflow:hidden}.overflow-x-auto{overflow-x:auto}.overflow-x-hidden{overflow-x:hidden}.overflow-y-auto{overflow-y:auto}.\!rounded-full{border-radius:3.40282e38px!important}.rounded{border-radius:.25rem}.rounded-\[4px\]{border-radius:4px}.rounded-\[inherit\]{border-radius:inherit}.rounded-full{border-radius:3.40282e38px}.rounded-lg{border-radius:var(--radius)}.rounded-md{border-radius:calc(var(--radius) - 2px)}.rounded-none{border-radius:0}.rounded-sm{border-radius:calc(var(--radius) - 4px)}.rounded-l-none{border-top-left-radius:0;border-bottom-left-radius:0}.rounded-r-none{border-top-right-radius:0;border-bottom-right-radius:0}.\!border{border-style:var(--tw-border-style)!important;border-width:1px!important}.border{border-style:var(--tw-border-style);border-width:1px}.border-2{border-style:var(--tw-border-style);border-width:2px}.border-t{border-top-style:var(--tw-border-style);border-top-width:1px}.border-r{border-right-style:var(--tw-border-style);border-right-width:1px}.border-b{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.border-l{border-left-style:var(--tw-border-style);border-left-width:1px}.border-l-0{border-left-style:var(--tw-border-style);border-left-width:0}.border-l-2{border-left-style:var(--tw-border-style);border-left-width:2px}.border-l-4{border-left-style:var(--tw-border-style);border-left-width:4px}.border-dashed{--tw-border-style:dashed;border-style:dashed}.\!border-gray-600{border-color:var(--color-gray-600)!important}.border-\[\#643FB2\]{border-color:#643fb2}.border-\[\#643FB2\]\/20{border-color:#643fb233}.border-\[\#643FB2\]\/30{border-color:#643fb24d}.border-amber-200{border-color:var(--color-amber-200)}.border-blue-200{border-color:var(--color-blue-200)}.border-blue-300{border-color:var(--color-blue-300)}.border-blue-400{border-color:var(--color-blue-400)}.border-blue-500{border-color:var(--color-blue-500)}.border-border,.border-border\/50{border-color:var(--border)}@supports (color:color-mix(in lab,red,red)){.border-border\/50{border-color:color-mix(in oklab,var(--border)50%,transparent)}}.border-current\/30{border-color:currentColor}@supports (color:color-mix(in lab,red,red)){.border-current\/30{border-color:color-mix(in oklab,currentcolor 30%,transparent)}}.border-destructive\/30{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/30{border-color:color-mix(in oklab,var(--destructive)30%,transparent)}}.border-foreground\/5{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/5{border-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.border-foreground\/10{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/10{border-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.border-foreground\/20{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/20{border-color:color-mix(in oklab,var(--foreground)20%,transparent)}}.border-gray-200{border-color:var(--color-gray-200)}.border-gray-300{border-color:var(--color-gray-300)}.border-gray-400{border-color:var(--color-gray-400)}.border-gray-500\/20{border-color:#6a728233}@supports (color:color-mix(in lab,red,red)){.border-gray-500\/20{border-color:color-mix(in oklab,var(--color-gray-500)20%,transparent)}}.border-green-200{border-color:var(--color-green-200)}.border-green-500{border-color:var(--color-green-500)}.border-green-500\/20{border-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.border-green-500\/20{border-color:color-mix(in oklab,var(--color-green-500)20%,transparent)}}.border-green-500\/40{border-color:#00c75866}@supports (color:color-mix(in lab,red,red)){.border-green-500\/40{border-color:color-mix(in oklab,var(--color-green-500)40%,transparent)}}.border-green-600\/20{border-color:#00a54433}@supports (color:color-mix(in lab,red,red)){.border-green-600\/20{border-color:color-mix(in oklab,var(--color-green-600)20%,transparent)}}.border-input{border-color:var(--input)}.border-muted,.border-muted\/50{border-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.border-muted\/50{border-color:color-mix(in oklab,var(--muted)50%,transparent)}}.border-orange-200{border-color:var(--color-orange-200)}.border-orange-300{border-color:var(--color-orange-300)}.border-orange-500{border-color:var(--color-orange-500)}.border-orange-500\/20{border-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.border-orange-500\/20{border-color:color-mix(in oklab,var(--color-orange-500)20%,transparent)}}.border-orange-500\/40{border-color:#fe6e0066}@supports (color:color-mix(in lab,red,red)){.border-orange-500\/40{border-color:color-mix(in oklab,var(--color-orange-500)40%,transparent)}}.border-orange-600\/20{border-color:#f0510033}@supports (color:color-mix(in lab,red,red)){.border-orange-600\/20{border-color:color-mix(in oklab,var(--color-orange-600)20%,transparent)}}.border-primary,.border-primary\/20{border-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.border-primary\/20{border-color:color-mix(in oklab,var(--primary)20%,transparent)}}.border-red-200{border-color:var(--color-red-200)}.border-red-500{border-color:var(--color-red-500)}.border-red-500\/20{border-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.border-red-500\/20{border-color:color-mix(in oklab,var(--color-red-500)20%,transparent)}}.border-transparent{border-color:#0000}.border-yellow-200{border-color:var(--color-yellow-200)}.border-t-transparent{border-top-color:#0000}.border-l-transparent{border-left-color:#0000}.bg-\[\#643FB2\]{background-color:#643fb2}.bg-\[\#643FB2\]\/10{background-color:#643fb21a}.bg-accent\/10{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.bg-accent\/10{background-color:color-mix(in oklab,var(--accent)10%,transparent)}}.bg-amber-50{background-color:var(--color-amber-50)}.bg-amber-500{background-color:var(--color-amber-500)}.bg-amber-500\/10{background-color:#f99c001a}@supports (color:color-mix(in lab,red,red)){.bg-amber-500\/10{background-color:color-mix(in oklab,var(--color-amber-500)10%,transparent)}}.bg-background,.bg-background\/50{background-color:var(--background)}@supports (color:color-mix(in lab,red,red)){.bg-background\/50{background-color:color-mix(in oklab,var(--background)50%,transparent)}}.bg-black{background-color:var(--color-black)}.bg-black\/50{background-color:#00000080}@supports (color:color-mix(in lab,red,red)){.bg-black\/50{background-color:color-mix(in oklab,var(--color-black)50%,transparent)}}.bg-black\/60{background-color:#0009}@supports (color:color-mix(in lab,red,red)){.bg-black\/60{background-color:color-mix(in oklab,var(--color-black)60%,transparent)}}.bg-blue-50{background-color:var(--color-blue-50)}.bg-blue-50\/80{background-color:#eff6ffcc}@supports (color:color-mix(in lab,red,red)){.bg-blue-50\/80{background-color:color-mix(in oklab,var(--color-blue-50)80%,transparent)}}.bg-blue-50\/95{background-color:#eff6fff2}@supports (color:color-mix(in lab,red,red)){.bg-blue-50\/95{background-color:color-mix(in oklab,var(--color-blue-50)95%,transparent)}}.bg-blue-100{background-color:var(--color-blue-100)}.bg-blue-500{background-color:var(--color-blue-500)}.bg-blue-500\/5{background-color:#3080ff0d}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/5{background-color:color-mix(in oklab,var(--color-blue-500)5%,transparent)}}.bg-blue-500\/10{background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/10{background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.bg-blue-600{background-color:var(--color-blue-600)}.bg-border{background-color:var(--border)}.bg-card{background-color:var(--card)}.bg-current{background-color:currentColor}.bg-destructive,.bg-destructive\/10{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.bg-destructive\/10{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.bg-emerald-500{background-color:var(--color-emerald-500)}.bg-foreground\/5{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/5{background-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.bg-foreground\/10{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/10{background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.bg-gray-50{background-color:var(--color-gray-50)}.bg-gray-100{background-color:var(--color-gray-100)}.bg-gray-200{background-color:var(--color-gray-200)}.bg-gray-400{background-color:var(--color-gray-400)}.bg-gray-500\/10{background-color:#6a72821a}@supports (color:color-mix(in lab,red,red)){.bg-gray-500\/10{background-color:color-mix(in oklab,var(--color-gray-500)10%,transparent)}}.bg-gray-900\/90{background-color:#101828e6}@supports (color:color-mix(in lab,red,red)){.bg-gray-900\/90{background-color:color-mix(in oklab,var(--color-gray-900)90%,transparent)}}.bg-green-50{background-color:var(--color-green-50)}.bg-green-100{background-color:var(--color-green-100)}.bg-green-500{background-color:var(--color-green-500)}.bg-green-500\/5{background-color:#00c7580d}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/5{background-color:color-mix(in oklab,var(--color-green-500)5%,transparent)}}.bg-green-500\/10{background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/10{background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.bg-muted{background-color:var(--muted)}.bg-muted-foreground\/20{background-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.bg-muted-foreground\/20{background-color:color-mix(in oklab,var(--muted-foreground)20%,transparent)}}.bg-muted-foreground\/30{background-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.bg-muted-foreground\/30{background-color:color-mix(in oklab,var(--muted-foreground)30%,transparent)}}.bg-muted\/30{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/30{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.bg-muted\/50{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/50{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.bg-orange-50{background-color:var(--color-orange-50)}.bg-orange-50\/50{background-color:#fff7ed80}@supports (color:color-mix(in lab,red,red)){.bg-orange-50\/50{background-color:color-mix(in oklab,var(--color-orange-50)50%,transparent)}}.bg-orange-100{background-color:var(--color-orange-100)}.bg-orange-100\/50{background-color:#ffedd580}@supports (color:color-mix(in lab,red,red)){.bg-orange-100\/50{background-color:color-mix(in oklab,var(--color-orange-100)50%,transparent)}}.bg-orange-500{background-color:var(--color-orange-500)}.bg-orange-500\/5{background-color:#fe6e000d}@supports (color:color-mix(in lab,red,red)){.bg-orange-500\/5{background-color:color-mix(in oklab,var(--color-orange-500)5%,transparent)}}.bg-orange-500\/10{background-color:#fe6e001a}@supports (color:color-mix(in lab,red,red)){.bg-orange-500\/10{background-color:color-mix(in oklab,var(--color-orange-500)10%,transparent)}}.bg-popover{background-color:var(--popover)}.bg-primary,.bg-primary\/10{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/10{background-color:color-mix(in oklab,var(--primary)10%,transparent)}}.bg-primary\/30{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/30{background-color:color-mix(in oklab,var(--primary)30%,transparent)}}.bg-primary\/40{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/40{background-color:color-mix(in oklab,var(--primary)40%,transparent)}}.bg-purple-50{background-color:var(--color-purple-50)}.bg-purple-100{background-color:var(--color-purple-100)}.bg-purple-500{background-color:var(--color-purple-500)}.bg-red-50{background-color:var(--color-red-50)}.bg-red-100{background-color:var(--color-red-100)}.bg-red-500{background-color:var(--color-red-500)}.bg-red-500\/10{background-color:#fb2c361a}@supports (color:color-mix(in lab,red,red)){.bg-red-500\/10{background-color:color-mix(in oklab,var(--color-red-500)10%,transparent)}}.bg-secondary{background-color:var(--secondary)}.bg-transparent{background-color:#0000}.bg-white{background-color:var(--color-white)}.bg-white\/60{background-color:#fff9}@supports (color:color-mix(in lab,red,red)){.bg-white\/60{background-color:color-mix(in oklab,var(--color-white)60%,transparent)}}.bg-white\/90{background-color:#ffffffe6}@supports (color:color-mix(in lab,red,red)){.bg-white\/90{background-color:color-mix(in oklab,var(--color-white)90%,transparent)}}.bg-yellow-100{background-color:var(--color-yellow-100)}.fill-current{fill:currentColor}.object-cover{object-fit:cover}.p-0{padding:calc(var(--spacing)*0)}.p-1{padding:calc(var(--spacing)*1)}.p-1\.5{padding:calc(var(--spacing)*1.5)}.p-2{padding:calc(var(--spacing)*2)}.p-3{padding:calc(var(--spacing)*3)}.p-4{padding:calc(var(--spacing)*4)}.p-6{padding:calc(var(--spacing)*6)}.p-8{padding:calc(var(--spacing)*8)}.p-\[1px\]{padding:1px}.px-1{padding-inline:calc(var(--spacing)*1)}.px-1\.5{padding-inline:calc(var(--spacing)*1.5)}.px-2{padding-inline:calc(var(--spacing)*2)}.px-2\.5{padding-inline:calc(var(--spacing)*2.5)}.px-3{padding-inline:calc(var(--spacing)*3)}.px-4{padding-inline:calc(var(--spacing)*4)}.px-6{padding-inline:calc(var(--spacing)*6)}.px-8{padding-inline:calc(var(--spacing)*8)}.py-0{padding-block:calc(var(--spacing)*0)}.py-0\.5{padding-block:calc(var(--spacing)*.5)}.py-1{padding-block:calc(var(--spacing)*1)}.py-1\.5{padding-block:calc(var(--spacing)*1.5)}.py-2{padding-block:calc(var(--spacing)*2)}.py-2\.5{padding-block:calc(var(--spacing)*2.5)}.py-3{padding-block:calc(var(--spacing)*3)}.py-4{padding-block:calc(var(--spacing)*4)}.py-6{padding-block:calc(var(--spacing)*6)}.py-8{padding-block:calc(var(--spacing)*8)}.pt-0{padding-top:calc(var(--spacing)*0)}.pt-1{padding-top:calc(var(--spacing)*1)}.pt-2{padding-top:calc(var(--spacing)*2)}.pt-3{padding-top:calc(var(--spacing)*3)}.pt-4{padding-top:calc(var(--spacing)*4)}.pt-6{padding-top:calc(var(--spacing)*6)}.pt-8{padding-top:calc(var(--spacing)*8)}.pr-2{padding-right:calc(var(--spacing)*2)}.pr-4{padding-right:calc(var(--spacing)*4)}.pr-8{padding-right:calc(var(--spacing)*8)}.pb-2{padding-bottom:calc(var(--spacing)*2)}.pb-3{padding-bottom:calc(var(--spacing)*3)}.pb-4{padding-bottom:calc(var(--spacing)*4)}.pb-6{padding-bottom:calc(var(--spacing)*6)}.pl-2{padding-left:calc(var(--spacing)*2)}.pl-3{padding-left:calc(var(--spacing)*3)}.pl-4{padding-left:calc(var(--spacing)*4)}.pl-5{padding-left:calc(var(--spacing)*5)}.pl-8{padding-left:calc(var(--spacing)*8)}.text-center{text-align:center}.text-left{text-align:left}.text-right{text-align:right}.font-mono{font-family:var(--font-mono)}.text-2xl{font-size:var(--text-2xl);line-height:var(--tw-leading,var(--text-2xl--line-height))}.text-base{font-size:var(--text-base);line-height:var(--tw-leading,var(--text-base--line-height))}.text-lg{font-size:var(--text-lg);line-height:var(--tw-leading,var(--text-lg--line-height))}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xl{font-size:var(--text-xl);line-height:var(--tw-leading,var(--text-xl--line-height))}.text-xs{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.text-\[10px\]{font-size:10px}.leading-none{--tw-leading:1;line-height:1}.leading-relaxed{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.leading-tight{--tw-leading:var(--leading-tight);line-height:var(--leading-tight)}.font-bold{--tw-font-weight:var(--font-weight-bold);font-weight:var(--font-weight-bold)}.font-medium{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.font-semibold{--tw-font-weight:var(--font-weight-semibold);font-weight:var(--font-weight-semibold)}.tracking-tight{--tw-tracking:var(--tracking-tight);letter-spacing:var(--tracking-tight)}.tracking-wide{--tw-tracking:var(--tracking-wide);letter-spacing:var(--tracking-wide)}.tracking-wider{--tw-tracking:var(--tracking-wider);letter-spacing:var(--tracking-wider)}.tracking-widest{--tw-tracking:var(--tracking-widest);letter-spacing:var(--tracking-widest)}.break-words{overflow-wrap:break-word}.break-all{word-break:break-all}.whitespace-nowrap{white-space:nowrap}.whitespace-pre-wrap{white-space:pre-wrap}.text-\[\#643FB2\]{color:#643fb2}.text-amber-500{color:var(--color-amber-500)}.text-amber-600{color:var(--color-amber-600)}.text-amber-600\/80{color:#dd7400cc}@supports (color:color-mix(in lab,red,red)){.text-amber-600\/80{color:color-mix(in oklab,var(--color-amber-600)80%,transparent)}}.text-amber-700{color:var(--color-amber-700)}.text-amber-800{color:var(--color-amber-800)}.text-amber-900{color:var(--color-amber-900)}.text-blue-500{color:var(--color-blue-500)}.text-blue-500\/80{color:#3080ffcc}@supports (color:color-mix(in lab,red,red)){.text-blue-500\/80{color:color-mix(in oklab,var(--color-blue-500)80%,transparent)}}.text-blue-600{color:var(--color-blue-600)}.text-blue-700{color:var(--color-blue-700)}.text-blue-800{color:var(--color-blue-800)}.text-blue-900{color:var(--color-blue-900)}.text-card-foreground{color:var(--card-foreground)}.text-current{color:currentColor}.text-destructive,.text-destructive\/70{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.text-destructive\/70{color:color-mix(in oklab,var(--destructive)70%,transparent)}}.text-destructive\/90{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.text-destructive\/90{color:color-mix(in oklab,var(--destructive)90%,transparent)}}.text-emerald-600{color:var(--color-emerald-600)}.text-foreground{color:var(--foreground)}.text-gray-300{color:var(--color-gray-300)}.text-gray-400{color:var(--color-gray-400)}.text-gray-500{color:var(--color-gray-500)}.text-gray-600{color:var(--color-gray-600)}.text-gray-700{color:var(--color-gray-700)}.text-gray-900{color:var(--color-gray-900)}.text-green-500{color:var(--color-green-500)}.text-green-600{color:var(--color-green-600)}.text-green-700{color:var(--color-green-700)}.text-green-800{color:var(--color-green-800)}.text-green-900{color:var(--color-green-900)}.text-muted-foreground,.text-muted-foreground\/60{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/60{color:color-mix(in oklab,var(--muted-foreground)60%,transparent)}}.text-muted-foreground\/70{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/70{color:color-mix(in oklab,var(--muted-foreground)70%,transparent)}}.text-muted-foreground\/80{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/80{color:color-mix(in oklab,var(--muted-foreground)80%,transparent)}}.text-orange-500{color:var(--color-orange-500)}.text-orange-600{color:var(--color-orange-600)}.text-orange-700{color:var(--color-orange-700)}.text-orange-800{color:var(--color-orange-800)}.text-orange-900{color:var(--color-orange-900)}.text-popover-foreground{color:var(--popover-foreground)}.text-primary{color:var(--primary)}.text-primary-foreground{color:var(--primary-foreground)}.text-purple-500{color:var(--color-purple-500)}.text-purple-600{color:var(--color-purple-600)}.text-purple-800{color:var(--color-purple-800)}.text-red-400{color:var(--color-red-400)}.text-red-500{color:var(--color-red-500)}.text-red-600{color:var(--color-red-600)}.text-red-700{color:var(--color-red-700)}.text-red-800{color:var(--color-red-800)}.text-secondary-foreground{color:var(--secondary-foreground)}.text-white{color:var(--color-white)}.text-yellow-600{color:var(--color-yellow-600)}.text-yellow-700{color:var(--color-yellow-700)}.capitalize{text-transform:capitalize}.lowercase{text-transform:lowercase}.uppercase{text-transform:uppercase}.italic{font-style:italic}.underline-offset-4{text-underline-offset:4px}.opacity-0{opacity:0}.opacity-50{opacity:.5}.opacity-60{opacity:.6}.opacity-70{opacity:.7}.opacity-80{opacity:.8}.opacity-100{opacity:1}.shadow{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-lg{--tw-shadow:0 10px 15px -3px var(--tw-shadow-color,#0000001a),0 4px 6px -4px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-md{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-sm{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-xs{--tw-shadow:0 1px 2px 0 var(--tw-shadow-color,#0000000d);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-0{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(0px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-2{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[\#643FB2\]\/20{--tw-shadow-color:#643fb233}@supports (color:color-mix(in lab,red,red)){.shadow-\[\#643FB2\]\/20{--tw-shadow-color:color-mix(in oklab,oklab(47.4316% .069152 -.159147/.2) var(--tw-shadow-alpha),transparent)}}.shadow-green-500\/20{--tw-shadow-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.shadow-green-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-green-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-orange-500\/20{--tw-shadow-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.shadow-orange-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-orange-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-primary\/25{--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.shadow-primary\/25{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)25%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-red-500\/20{--tw-shadow-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.shadow-red-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-red-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.ring-blue-500{--tw-ring-color:var(--color-blue-500)}.ring-blue-500\/20{--tw-ring-color:#3080ff33}@supports (color:color-mix(in lab,red,red)){.ring-blue-500\/20{--tw-ring-color:color-mix(in oklab,var(--color-blue-500)20%,transparent)}}.ring-offset-2{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.ring-offset-background{--tw-ring-offset-color:var(--background)}.outline-hidden{--tw-outline-style:none;outline-style:none}@media (forced-colors:active){.outline-hidden{outline-offset:2px;outline:2px solid #0000}}.outline{outline-style:var(--tw-outline-style);outline-width:1px}.drop-shadow-lg{--tw-drop-shadow-size:drop-shadow(0 4px 4px var(--tw-drop-shadow-color,#00000026));--tw-drop-shadow:drop-shadow(var(--drop-shadow-lg));filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.filter{filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.backdrop-blur-sm{--tw-backdrop-blur:blur(var(--blur-sm));-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.transition{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to,opacity,box-shadow,transform,translate,scale,rotate,filter,-webkit-backdrop-filter,backdrop-filter,display,visibility,content-visibility,overlay,pointer-events;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-\[color\,box-shadow\]{transition-property:color,box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-all{transition-property:all;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-colors{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-opacity{transition-property:opacity;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-shadow{transition-property:box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-transform{transition-property:transform,translate,scale,rotate;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-none{transition-property:none}.duration-150{--tw-duration:.15s;transition-duration:.15s}.duration-200{--tw-duration:.2s;transition-duration:.2s}.duration-300{--tw-duration:.3s;transition-duration:.3s}.ease-in-out{--tw-ease:var(--ease-in-out);transition-timing-function:var(--ease-in-out)}.ease-out{--tw-ease:var(--ease-out);transition-timing-function:var(--ease-out)}.fade-in-0{--tw-enter-opacity:0}.outline-none{--tw-outline-style:none;outline-style:none}.select-none{-webkit-user-select:none;user-select:none}.zoom-in-95{--tw-enter-scale:.95}.\[animation-delay\:-0\.3s\]{animation-delay:-.3s}.\[animation-delay\:-0\.15s\]{animation-delay:-.15s}.fade-in{--tw-enter-opacity:0}.running{animation-play-state:running}.slide-in-from-bottom-2{--tw-enter-translate-y:calc(2*var(--spacing))}.group-open\:rotate-90:is(:where(.group):is([open],:popover-open,:open) *){rotate:90deg}.group-open\:rotate-180:is(:where(.group):is([open],:popover-open,:open) *){rotate:180deg}@media (hover:hover){.group-hover\:bg-primary:is(:where(.group):hover *){background-color:var(--primary)}.group-hover\:opacity-100:is(:where(.group):hover *){opacity:1}.group-hover\:shadow-md:is(:where(.group):hover *){--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)20%,transparent)var(--tw-shadow-alpha),transparent)}}}.group-data-\[disabled\=true\]\:pointer-events-none:is(:where(.group)[data-disabled=true] *){pointer-events:none}.group-data-\[disabled\=true\]\:opacity-50:is(:where(.group)[data-disabled=true] *){opacity:.5}.peer-disabled\:cursor-not-allowed:is(:where(.peer):disabled~*){cursor:not-allowed}.peer-disabled\:opacity-50:is(:where(.peer):disabled~*){opacity:.5}.selection\:bg-primary ::selection{background-color:var(--primary)}.selection\:bg-primary::selection{background-color:var(--primary)}.selection\:text-primary-foreground ::selection{color:var(--primary-foreground)}.selection\:text-primary-foreground::selection{color:var(--primary-foreground)}.file\:inline-flex::file-selector-button{display:inline-flex}.file\:h-7::file-selector-button{height:calc(var(--spacing)*7)}.file\:border-0::file-selector-button{border-style:var(--tw-border-style);border-width:0}.file\:bg-transparent::file-selector-button{background-color:#0000}.file\:text-sm::file-selector-button{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.file\:font-medium::file-selector-button{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.file\:text-foreground::file-selector-button{color:var(--foreground)}.placeholder\:text-muted-foreground::placeholder{color:var(--muted-foreground)}.first\:mt-0:first-child{margin-top:calc(var(--spacing)*0)}.last\:border-0:last-child{border-style:var(--tw-border-style);border-width:0}.last\:border-r-0:last-child{border-right-style:var(--tw-border-style);border-right-width:0}.last\:border-b-0:last-child{border-bottom-style:var(--tw-border-style);border-bottom-width:0}@media (hover:hover){.hover\:scale-y-\[1\.15\]:hover{--tw-scale-y:1.15;scale:var(--tw-scale-x)var(--tw-scale-y)}.hover\:border-gray-300:hover{border-color:var(--color-gray-300)}.hover\:border-muted-foreground\/30:hover{border-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.hover\:border-muted-foreground\/30:hover{border-color:color-mix(in oklab,var(--muted-foreground)30%,transparent)}}.hover\:bg-accent:hover,.hover\:bg-accent\/50:hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-accent\/50:hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.hover\:bg-amber-100:hover{background-color:var(--color-amber-100)}.hover\:bg-blue-700:hover{background-color:var(--color-blue-700)}.hover\:bg-destructive\/80:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/80:hover{background-color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:bg-destructive\/90:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/90:hover{background-color:color-mix(in oklab,var(--destructive)90%,transparent)}}.hover\:bg-muted:hover,.hover\:bg-muted\/30:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/30:hover{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.hover\:bg-muted\/50:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/50:hover{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.hover\:bg-muted\/70:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/70:hover{background-color:color-mix(in oklab,var(--muted)70%,transparent)}}.hover\:bg-orange-100:hover{background-color:var(--color-orange-100)}.hover\:bg-primary\/20:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/20:hover{background-color:color-mix(in oklab,var(--primary)20%,transparent)}}.hover\:bg-primary\/80:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/80:hover{background-color:color-mix(in oklab,var(--primary)80%,transparent)}}.hover\:bg-primary\/90:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/90:hover{background-color:color-mix(in oklab,var(--primary)90%,transparent)}}.hover\:bg-red-50:hover{background-color:var(--color-red-50)}.hover\:bg-secondary\/80:hover{background-color:var(--secondary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-secondary\/80:hover{background-color:color-mix(in oklab,var(--secondary)80%,transparent)}}.hover\:bg-white:hover{background-color:var(--color-white)}.hover\:text-accent-foreground:hover{color:var(--accent-foreground)}.hover\:text-destructive\/80:hover{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:text-destructive\/80:hover{color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:text-foreground:hover{color:var(--foreground)}.hover\:text-orange-900:hover{color:var(--color-orange-900)}.hover\:text-primary:hover{color:var(--primary)}.hover\:text-red-600:hover{color:var(--color-red-600)}.hover\:underline:hover{text-decoration-line:underline}.hover\:opacity-70:hover{opacity:.7}.hover\:opacity-100:hover{opacity:1}.hover\:shadow-md:hover{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.hover\:brightness-110:hover{--tw-brightness:brightness(110%);filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}}.focus\:bg-accent:focus{background-color:var(--accent)}.focus\:text-accent-foreground:focus{color:var(--accent-foreground)}.focus\:ring-2:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-ring:focus{--tw-ring-color:var(--ring)}.focus\:ring-offset-2:focus{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:outline-none:focus{--tw-outline-style:none;outline-style:none}.focus-visible\:border-ring:focus-visible{border-color:var(--ring)}.focus-visible\:ring-1:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(1px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-2:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-\[3px\]:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(3px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.focus-visible\:ring-ring:focus-visible,.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:color-mix(in oklab,var(--ring)50%,transparent)}}.focus-visible\:ring-offset-2:focus-visible{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus-visible\:ring-offset-background:focus-visible{--tw-ring-offset-color:var(--background)}.focus-visible\:outline-none:focus-visible{--tw-outline-style:none;outline-style:none}.disabled\:pointer-events-none:disabled{pointer-events:none}.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}.disabled\:opacity-50:disabled{opacity:.5}.has-data-\[slot\=card-action\]\:grid-cols-\[1fr_auto\]:has([data-slot=card-action]){grid-template-columns:1fr auto}.has-\[\>svg\]\:px-2\.5:has(>svg){padding-inline:calc(var(--spacing)*2.5)}.has-\[\>svg\]\:px-3:has(>svg){padding-inline:calc(var(--spacing)*3)}.has-\[\>svg\]\:px-4:has(>svg){padding-inline:calc(var(--spacing)*4)}.aria-invalid\:border-destructive[aria-invalid=true]{border-color:var(--destructive)}.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.data-\[disabled\]\:pointer-events-none[data-disabled]{pointer-events:none}.data-\[disabled\]\:opacity-50[data-disabled]{opacity:.5}.data-\[inset\]\:pl-8[data-inset]{padding-left:calc(var(--spacing)*8)}.data-\[placeholder\]\:text-muted-foreground[data-placeholder]{color:var(--muted-foreground)}.data-\[side\=bottom\]\:translate-y-1[data-side=bottom]{--tw-translate-y:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=bottom\]\:slide-in-from-top-2[data-side=bottom]{--tw-enter-translate-y:calc(2*var(--spacing)*-1)}.data-\[side\=left\]\:-translate-x-1[data-side=left]{--tw-translate-x:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=left\]\:slide-in-from-right-2[data-side=left]{--tw-enter-translate-x:calc(2*var(--spacing))}.data-\[side\=right\]\:translate-x-1[data-side=right]{--tw-translate-x:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=right\]\:slide-in-from-left-2[data-side=right]{--tw-enter-translate-x:calc(2*var(--spacing)*-1)}.data-\[side\=top\]\:-translate-y-1[data-side=top]{--tw-translate-y:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=top\]\:slide-in-from-bottom-2[data-side=top]{--tw-enter-translate-y:calc(2*var(--spacing))}.data-\[size\=default\]\:h-9[data-size=default]{height:calc(var(--spacing)*9)}.data-\[size\=sm\]\:h-8[data-size=sm]{height:calc(var(--spacing)*8)}:is(.\*\:data-\[slot\=select-value\]\:line-clamp-1>*)[data-slot=select-value]{-webkit-line-clamp:1;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}:is(.\*\:data-\[slot\=select-value\]\:flex>*)[data-slot=select-value]{display:flex}:is(.\*\:data-\[slot\=select-value\]\:items-center>*)[data-slot=select-value]{align-items:center}:is(.\*\:data-\[slot\=select-value\]\:gap-2>*)[data-slot=select-value]{gap:calc(var(--spacing)*2)}.data-\[state\=active\]\:bg-background[data-state=active]{background-color:var(--background)}.data-\[state\=active\]\:text-foreground[data-state=active]{color:var(--foreground)}.data-\[state\=active\]\:shadow[data-state=active]{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.data-\[state\=checked\]\:translate-x-4[data-state=checked]{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[state\=checked\]\:border-primary[data-state=checked]{border-color:var(--primary)}.data-\[state\=checked\]\:bg-primary[data-state=checked]{background-color:var(--primary)}.data-\[state\=checked\]\:text-primary-foreground[data-state=checked]{color:var(--primary-foreground)}.data-\[state\=closed\]\:animate-out[data-state=closed]{animation:exit var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=closed\]\:fade-out-0[data-state=closed]{--tw-exit-opacity:0}.data-\[state\=closed\]\:zoom-out-95[data-state=closed]{--tw-exit-scale:.95}.data-\[state\=open\]\:animate-in[data-state=open]{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=open\]\:bg-accent[data-state=open]{background-color:var(--accent)}.data-\[state\=open\]\:text-accent-foreground[data-state=open]{color:var(--accent-foreground)}.data-\[state\=open\]\:fade-in-0[data-state=open]{--tw-enter-opacity:0}.data-\[state\=open\]\:zoom-in-95[data-state=open]{--tw-enter-scale:.95}.data-\[state\=unchecked\]\:translate-x-0[data-state=unchecked]{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[state\=unchecked\]\:bg-input[data-state=unchecked]{background-color:var(--input)}.data-\[variant\=destructive\]\:text-destructive[data-variant=destructive]{color:var(--destructive)}.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.data-\[variant\=destructive\]\:focus\:text-destructive[data-variant=destructive]:focus{color:var(--destructive)}@media (min-width:40rem){.sm\:col-span-2{grid-column:span 2/span 2}.sm\:w-64{width:calc(var(--spacing)*64)}.sm\:max-w-lg{max-width:var(--container-lg)}.sm\:flex-none{flex:none}.sm\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.sm\:flex-row{flex-direction:row}.sm\:items-center{align-items:center}}@media (min-width:48rem){.md\:col-span-2{grid-column:span 2/span 2}.md\:col-start-2{grid-column-start:2}.md\:inline{display:inline}.md\:max-w-2xl{max-width:var(--container-2xl)}.md\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.md\:gap-8{gap:calc(var(--spacing)*8)}.md\:text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}}@media (min-width:64rem){.lg\:col-span-3{grid-column:span 3/span 3}.lg\:max-w-4xl{max-width:var(--container-4xl)}.lg\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.lg\:flex-row{flex-direction:row}.lg\:items-center{align-items:center}.lg\:justify-between{justify-content:space-between}}@media (min-width:80rem){.xl\:col-span-2{grid-column:span 2/span 2}.xl\:col-span-4{grid-column:span 4/span 4}.xl\:max-w-5xl{max-width:var(--container-5xl)}.xl\:grid-cols-4{grid-template-columns:repeat(4,minmax(0,1fr))}}.dark\:scale-0:is(.dark *){--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:scale-100:is(.dark *){--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:-rotate-90:is(.dark *){rotate:-90deg}.dark\:rotate-0:is(.dark *){rotate:none}.dark\:\!border-gray-500:is(.dark *){border-color:var(--color-gray-500)!important}.dark\:\!border-gray-600:is(.dark *){border-color:var(--color-gray-600)!important}.dark\:border-\[\#8B5CF6\]:is(.dark *){border-color:#8b5cf6}.dark\:border-\[\#8B5CF6\]\/20:is(.dark *){border-color:#8b5cf633}.dark\:border-\[\#8B5CF6\]\/30:is(.dark *){border-color:#8b5cf64d}.dark\:border-amber-800:is(.dark *){border-color:var(--color-amber-800)}.dark\:border-amber-900:is(.dark *){border-color:var(--color-amber-900)}.dark\:border-blue-400:is(.dark *){border-color:var(--color-blue-400)}.dark\:border-blue-500:is(.dark *){border-color:var(--color-blue-500)}.dark\:border-blue-700:is(.dark *){border-color:var(--color-blue-700)}.dark\:border-blue-800:is(.dark *){border-color:var(--color-blue-800)}.dark\:border-gray-500:is(.dark *){border-color:var(--color-gray-500)}.dark\:border-gray-600:is(.dark *){border-color:var(--color-gray-600)}.dark\:border-gray-700:is(.dark *){border-color:var(--color-gray-700)}.dark\:border-green-400:is(.dark *){border-color:var(--color-green-400)}.dark\:border-green-800:is(.dark *){border-color:var(--color-green-800)}.dark\:border-input:is(.dark *){border-color:var(--input)}.dark\:border-orange-400:is(.dark *){border-color:var(--color-orange-400)}.dark\:border-orange-700:is(.dark *){border-color:var(--color-orange-700)}.dark\:border-orange-800:is(.dark *){border-color:var(--color-orange-800)}.dark\:border-red-400:is(.dark *){border-color:var(--color-red-400)}.dark\:border-red-800:is(.dark *){border-color:var(--color-red-800)}.dark\:\!bg-gray-800\/90:is(.dark *){background-color:#1e2939e6!important}@supports (color:color-mix(in lab,red,red)){.dark\:\!bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)!important}}.dark\:bg-\[\#8B5CF6\]:is(.dark *){background-color:#8b5cf6}.dark\:bg-\[\#8B5CF6\]\/10:is(.dark *){background-color:#8b5cf61a}.dark\:bg-amber-600:is(.dark *){background-color:var(--color-amber-600)}.dark\:bg-amber-950\/20:is(.dark *){background-color:#46190133}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)20%,transparent)}}.dark\:bg-amber-950\/50:is(.dark *){background-color:#46190180}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)50%,transparent)}}.dark\:bg-blue-500\/10:is(.dark *){background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.dark\:bg-blue-600:is(.dark *){background-color:var(--color-blue-600)}.dark\:bg-blue-900:is(.dark *){background-color:var(--color-blue-900)}.dark\:bg-blue-900\/20:is(.dark *){background-color:#1c398e33}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-900\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-900)20%,transparent)}}.dark\:bg-blue-950\/20:is(.dark *){background-color:#16245633}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)20%,transparent)}}.dark\:bg-blue-950\/40:is(.dark *){background-color:#16245666}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/40:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)40%,transparent)}}.dark\:bg-blue-950\/50:is(.dark *){background-color:#16245680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)50%,transparent)}}.dark\:bg-blue-950\/95:is(.dark *){background-color:#162456f2}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/95:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)95%,transparent)}}.dark\:bg-card:is(.dark *){background-color:var(--card)}.dark\:bg-destructive\/60:is(.dark *){background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-destructive\/60:is(.dark *){background-color:color-mix(in oklab,var(--destructive)60%,transparent)}}.dark\:bg-emerald-600:is(.dark *){background-color:var(--color-emerald-600)}.dark\:bg-foreground\/10:is(.dark *){background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-foreground\/10:is(.dark *){background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.dark\:bg-gray-500:is(.dark *){background-color:var(--color-gray-500)}.dark\:bg-gray-800:is(.dark *){background-color:var(--color-gray-800)}.dark\:bg-gray-800\/90:is(.dark *){background-color:#1e2939e6}@supports (color:color-mix(in lab,red,red)){.dark\:bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)}}.dark\:bg-gray-900:is(.dark *){background-color:var(--color-gray-900)}.dark\:bg-gray-900\/30:is(.dark *){background-color:#1018284d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-gray-900\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-900)30%,transparent)}}.dark\:bg-green-400:is(.dark *){background-color:var(--color-green-400)}.dark\:bg-green-500\/10:is(.dark *){background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.dark\:bg-green-900:is(.dark *){background-color:var(--color-green-900)}.dark\:bg-green-950:is(.dark *){background-color:var(--color-green-950)}.dark\:bg-green-950\/20:is(.dark *){background-color:#032e1533}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)20%,transparent)}}.dark\:bg-green-950\/50:is(.dark *){background-color:#032e1580}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)50%,transparent)}}.dark\:bg-input\/30:is(.dark *){background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-input\/30:is(.dark *){background-color:color-mix(in oklab,var(--input)30%,transparent)}}.dark\:bg-orange-400:is(.dark *){background-color:var(--color-orange-400)}.dark\:bg-orange-500\/10:is(.dark *){background-color:#fe6e001a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-500)10%,transparent)}}.dark\:bg-orange-600:is(.dark *){background-color:var(--color-orange-600)}.dark\:bg-orange-900:is(.dark *){background-color:var(--color-orange-900)}.dark\:bg-orange-950:is(.dark *){background-color:var(--color-orange-950)}.dark\:bg-orange-950\/20:is(.dark *){background-color:#44130633}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)20%,transparent)}}.dark\:bg-orange-950\/30:is(.dark *){background-color:#4413064d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)30%,transparent)}}.dark\:bg-orange-950\/50:is(.dark *){background-color:#44130680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)50%,transparent)}}.dark\:bg-purple-600:is(.dark *){background-color:var(--color-purple-600)}.dark\:bg-purple-900:is(.dark *){background-color:var(--color-purple-900)}.dark\:bg-red-400:is(.dark *){background-color:var(--color-red-400)}.dark\:bg-red-900:is(.dark *){background-color:var(--color-red-900)}.dark\:bg-red-950:is(.dark *){background-color:var(--color-red-950)}.dark\:bg-red-950\/20:is(.dark *){background-color:#46080933}@supports (color:color-mix(in lab,red,red)){.dark\:bg-red-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-red-950)20%,transparent)}}.dark\:text-\[\#8B5CF6\]:is(.dark *){color:#8b5cf6}.dark\:text-amber-100:is(.dark *){color:var(--color-amber-100)}.dark\:text-amber-200:is(.dark *){color:var(--color-amber-200)}.dark\:text-amber-300:is(.dark *){color:var(--color-amber-300)}.dark\:text-amber-400:is(.dark *){color:var(--color-amber-400)}.dark\:text-amber-400\/80:is(.dark *){color:#fcbb00cc}@supports (color:color-mix(in lab,red,red)){.dark\:text-amber-400\/80:is(.dark *){color:color-mix(in oklab,var(--color-amber-400)80%,transparent)}}.dark\:text-amber-500:is(.dark *){color:var(--color-amber-500)}.dark\:text-blue-100:is(.dark *){color:var(--color-blue-100)}.dark\:text-blue-200:is(.dark *){color:var(--color-blue-200)}.dark\:text-blue-300:is(.dark *){color:var(--color-blue-300)}.dark\:text-blue-400:is(.dark *){color:var(--color-blue-400)}.dark\:text-blue-400\/70:is(.dark *){color:#54a2ffb3}@supports (color:color-mix(in lab,red,red)){.dark\:text-blue-400\/70:is(.dark *){color:color-mix(in oklab,var(--color-blue-400)70%,transparent)}}.dark\:text-blue-500:is(.dark *){color:var(--color-blue-500)}.dark\:text-emerald-400:is(.dark *){color:var(--color-emerald-400)}.dark\:text-gray-100:is(.dark *){color:var(--color-gray-100)}.dark\:text-gray-300:is(.dark *){color:var(--color-gray-300)}.dark\:text-gray-400:is(.dark *){color:var(--color-gray-400)}.dark\:text-green-100:is(.dark *){color:var(--color-green-100)}.dark\:text-green-200:is(.dark *){color:var(--color-green-200)}.dark\:text-green-300:is(.dark *){color:var(--color-green-300)}.dark\:text-green-400:is(.dark *){color:var(--color-green-400)}.dark\:text-orange-100:is(.dark *){color:var(--color-orange-100)}.dark\:text-orange-200:is(.dark *){color:var(--color-orange-200)}.dark\:text-orange-300:is(.dark *){color:var(--color-orange-300)}.dark\:text-orange-400:is(.dark *){color:var(--color-orange-400)}.dark\:text-purple-200:is(.dark *){color:var(--color-purple-200)}.dark\:text-purple-400:is(.dark *){color:var(--color-purple-400)}.dark\:text-red-200:is(.dark *){color:var(--color-red-200)}.dark\:text-red-400:is(.dark *){color:var(--color-red-400)}.dark\:text-yellow-400:is(.dark *){color:var(--color-yellow-400)}.dark\:opacity-30:is(.dark *){opacity:.3}@media (hover:hover){.dark\:hover\:border-gray-600:is(.dark *):hover{border-color:var(--color-gray-600)}.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:#4619014d}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-amber-950)30%,transparent)}}.dark\:hover\:bg-gray-800:is(.dark *):hover{background-color:var(--color-gray-800)}.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--input)50%,transparent)}}.dark\:hover\:bg-orange-950\/40:is(.dark *):hover{background-color:#44130666}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-orange-950\/40:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-orange-950)40%,transparent)}}.dark\:hover\:bg-red-900\/20:is(.dark *):hover{background-color:#82181a33}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-red-900\/20:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-red-900)20%,transparent)}}.dark\:hover\:text-orange-200:is(.dark *):hover{color:var(--color-orange-200)}}.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:data-\[state\=checked\]\:bg-primary:is(.dark *)[data-state=checked]{background-color:var(--primary)}.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.\[\&_p\]\:leading-relaxed p{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.\[\&_svg\]\:pointer-events-none svg{pointer-events:none}.\[\&_svg\]\:shrink-0 svg{flex-shrink:0}.\[\&_svg\:not\(\[class\*\=\'size-\'\]\)\]\:size-4 svg:not([class*=size-]){width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.\[\&_svg\:not\(\[class\*\=\'text-\'\]\)\]\:text-muted-foreground svg:not([class*=text-]){color:var(--muted-foreground)}.\[\.border-b\]\:pb-6.border-b{padding-bottom:calc(var(--spacing)*6)}.\[\.border-t\]\:pt-6.border-t{padding-top:calc(var(--spacing)*6)}:is(.\*\:\[span\]\:last\:flex>*):is(span):last-child{display:flex}:is(.\*\:\[span\]\:last\:items-center>*):is(span):last-child{align-items:center}:is(.\*\:\[span\]\:last\:gap-2>*):is(span):last-child{gap:calc(var(--spacing)*2)}:is(.data-\[variant\=destructive\]\:\*\:\[svg\]\:\!text-destructive[data-variant=destructive]>*):is(svg){color:var(--destructive)!important}.\[\&\>svg\]\:absolute>svg{position:absolute}.\[\&\>svg\]\:top-4>svg{top:calc(var(--spacing)*4)}.\[\&\>svg\]\:left-4>svg{left:calc(var(--spacing)*4)}.\[\&\>svg\]\:text-foreground>svg{color:var(--foreground)}.\[\&\>svg\+div\]\:translate-y-\[-3px\]>svg+div{--tw-translate-y:-3px;translate:var(--tw-translate-x)var(--tw-translate-y)}.\[\&\>svg\~\*\]\:pl-7>svg~*{padding-left:calc(var(--spacing)*7)}}@property --tw-animation-delay{syntax:"*";inherits:false;initial-value:0s}@property --tw-animation-direction{syntax:"*";inherits:false;initial-value:normal}@property --tw-animation-duration{syntax:"*";inherits:false}@property --tw-animation-fill-mode{syntax:"*";inherits:false;initial-value:none}@property --tw-animation-iteration-count{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-translate-y{syntax:"*";inherits:false;initial-value:0}:root{--radius:.625rem;--background:oklch(100% 0 0);--foreground:oklch(14.5% 0 0);--card:oklch(100% 0 0);--card-foreground:oklch(14.5% 0 0);--popover:oklch(100% 0 0);--popover-foreground:oklch(14.5% 0 0);--primary:oklch(48% .18 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(97% 0 0);--secondary-foreground:oklch(20.5% 0 0);--muted:oklch(97% 0 0);--muted-foreground:oklch(55.6% 0 0);--accent:oklch(97% 0 0);--accent-foreground:oklch(20.5% 0 0);--destructive:oklch(57.7% .245 27.325);--border:oklch(92.2% 0 0);--input:oklch(92.2% 0 0);--ring:oklch(70.8% 0 0);--chart-1:oklch(64.6% .222 41.116);--chart-2:oklch(60% .118 184.704);--chart-3:oklch(39.8% .07 227.392);--chart-4:oklch(82.8% .189 84.429);--chart-5:oklch(76.9% .188 70.08);--sidebar:oklch(98.5% 0 0);--sidebar-foreground:oklch(14.5% 0 0);--sidebar-primary:oklch(20.5% 0 0);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(97% 0 0);--sidebar-accent-foreground:oklch(20.5% 0 0);--sidebar-border:oklch(92.2% 0 0);--sidebar-ring:oklch(70.8% 0 0)}.dark{--background:oklch(14.5% 0 0);--foreground:oklch(98.5% 0 0);--card:oklch(20.5% 0 0);--card-foreground:oklch(98.5% 0 0);--popover:oklch(20.5% 0 0);--popover-foreground:oklch(98.5% 0 0);--primary:oklch(62% .2 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(26.9% 0 0);--secondary-foreground:oklch(98.5% 0 0);--muted:oklch(26.9% 0 0);--muted-foreground:oklch(70.8% 0 0);--accent:oklch(26.9% 0 0);--accent-foreground:oklch(98.5% 0 0);--destructive:oklch(70.4% .191 22.216);--border:oklch(100% 0 0/.1);--input:oklch(100% 0 0/.15);--ring:oklch(55.6% 0 0);--chart-1:oklch(48.8% .243 264.376);--chart-2:oklch(69.6% .17 162.48);--chart-3:oklch(76.9% .188 70.08);--chart-4:oklch(62.7% .265 303.9);--chart-5:oklch(64.5% .246 16.439);--sidebar:oklch(20.5% 0 0);--sidebar-foreground:oklch(98.5% 0 0);--sidebar-primary:oklch(48.8% .243 264.376);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(26.9% 0 0);--sidebar-accent-foreground:oklch(98.5% 0 0);--sidebar-border:oklch(100% 0 0/.1);--sidebar-ring:oklch(55.6% 0 0)}.workflow-chat-view .border-green-200{border-color:var(--color-emerald-200)}.workflow-chat-view .bg-green-50{background-color:var(--color-emerald-50)}.workflow-chat-view .bg-green-100{background-color:var(--color-emerald-100)}.workflow-chat-view .text-green-600{color:var(--color-emerald-600)}.workflow-chat-view .text-green-700{color:var(--color-emerald-700)}.workflow-chat-view .text-green-800{color:var(--color-emerald-800)}.highlight-attention{animation:1s ease-out highlight-flash}@keyframes highlight-flash{0%{background-color:#fb923c4d;transform:scale(1.02)}to{background-color:#0000;transform:scale(1)}}.hil-waiting-glow{animation:2s infinite pulse-glow;box-shadow:0 0 #fb923c66,inset 0 0 0 1px #fb923c33}@keyframes pulse-glow{0%,to{box-shadow:0 0 #fb923c66,inset 0 0 0 1px #fb923c33}50%{box-shadow:0 0 20px 5px #fb923c33,inset 0 0 0 2px #fb923c4d}}@property --tw-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-z{syntax:"*";inherits:false;initial-value:0}@property --tw-scale-x{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-y{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-z{syntax:"*";inherits:false;initial-value:1}@property --tw-rotate-x{syntax:"*";inherits:false}@property --tw-rotate-y{syntax:"*";inherits:false}@property --tw-rotate-z{syntax:"*";inherits:false}@property --tw-skew-x{syntax:"*";inherits:false}@property --tw-skew-y{syntax:"*";inherits:false}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-space-x-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-leading{syntax:"*";inherits:false}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-tracking{syntax:"*";inherits:false}@property --tw-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-shadow-color{syntax:"*";inherits:false}@property --tw-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-inset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-shadow-color{syntax:"*";inherits:false}@property --tw-inset-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-ring-color{syntax:"*";inherits:false}@property --tw-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-ring-color{syntax:"*";inherits:false}@property --tw-inset-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-ring-inset{syntax:"*";inherits:false}@property --tw-ring-offset-width{syntax:"";inherits:false;initial-value:0}@property --tw-ring-offset-color{syntax:"*";inherits:false;initial-value:#fff}@property --tw-ring-offset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-outline-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-blur{syntax:"*";inherits:false}@property --tw-brightness{syntax:"*";inherits:false}@property --tw-contrast{syntax:"*";inherits:false}@property --tw-grayscale{syntax:"*";inherits:false}@property --tw-hue-rotate{syntax:"*";inherits:false}@property --tw-invert{syntax:"*";inherits:false}@property --tw-opacity{syntax:"*";inherits:false}@property --tw-saturate{syntax:"*";inherits:false}@property --tw-sepia{syntax:"*";inherits:false}@property --tw-drop-shadow{syntax:"*";inherits:false}@property --tw-drop-shadow-color{syntax:"*";inherits:false}@property --tw-drop-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-drop-shadow-size{syntax:"*";inherits:false}@property --tw-backdrop-blur{syntax:"*";inherits:false}@property --tw-backdrop-brightness{syntax:"*";inherits:false}@property --tw-backdrop-contrast{syntax:"*";inherits:false}@property --tw-backdrop-grayscale{syntax:"*";inherits:false}@property --tw-backdrop-hue-rotate{syntax:"*";inherits:false}@property --tw-backdrop-invert{syntax:"*";inherits:false}@property --tw-backdrop-opacity{syntax:"*";inherits:false}@property --tw-backdrop-saturate{syntax:"*";inherits:false}@property --tw-backdrop-sepia{syntax:"*";inherits:false}@property --tw-duration{syntax:"*";inherits:false}@property --tw-ease{syntax:"*";inherits:false}@keyframes spin{to{transform:rotate(360deg)}}@keyframes pulse{50%{opacity:.5}}@keyframes bounce{0%,to{animation-timing-function:cubic-bezier(.8,0,1,1);transform:translateY(-25%)}50%{animation-timing-function:cubic-bezier(0,0,.2,1);transform:none}}@keyframes enter{0%{opacity:var(--tw-enter-opacity,1);transform:translate3d(var(--tw-enter-translate-x,0),var(--tw-enter-translate-y,0),0)scale3d(var(--tw-enter-scale,1),var(--tw-enter-scale,1),var(--tw-enter-scale,1))rotate(var(--tw-enter-rotate,0));filter:blur(var(--tw-enter-blur,0))}}@keyframes exit{to{opacity:var(--tw-exit-opacity,1);transform:translate3d(var(--tw-exit-translate-x,0),var(--tw-exit-translate-y,0),0)scale3d(var(--tw-exit-scale,1),var(--tw-exit-scale,1),var(--tw-exit-scale,1))rotate(var(--tw-exit-rotate,0));filter:blur(var(--tw-exit-blur,0))}}.react-flow{direction:ltr;--xy-edge-stroke-default: #b1b1b7;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #555;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(255, 255, 255, .5);--xy-minimap-background-color-default: #fff;--xy-minimap-mask-background-color-default: rgba(240, 240, 240, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #e2e2e2;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: transparent;--xy-background-pattern-dots-color-default: #91919a;--xy-background-pattern-lines-color-default: #eee;--xy-background-pattern-cross-color-default: #e2e2e2;background-color:var(--xy-background-color, var(--xy-background-color-default));--xy-node-color-default: inherit;--xy-node-border-default: 1px solid #1a192b;--xy-node-background-color-default: #fff;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(0, 0, 0, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #1a192b;--xy-node-border-radius-default: 3px;--xy-handle-background-color-default: #1a192b;--xy-handle-border-color-default: #fff;--xy-selection-background-color-default: rgba(0, 89, 220, .08);--xy-selection-border-default: 1px dotted rgba(0, 89, 220, .8);--xy-controls-button-background-color-default: #fefefe;--xy-controls-button-background-color-hover-default: #f4f4f4;--xy-controls-button-color-default: inherit;--xy-controls-button-color-hover-default: inherit;--xy-controls-button-border-color-default: #eee;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #ffffff;--xy-edge-label-color-default: inherit;--xy-resize-background-color-default: #3367d9}.react-flow.dark{--xy-edge-stroke-default: #3e3e3e;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #727272;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(150, 150, 150, .25);--xy-minimap-background-color-default: #141414;--xy-minimap-mask-background-color-default: rgba(60, 60, 60, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #2b2b2b;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: #141414;--xy-background-pattern-dots-color-default: #777;--xy-background-pattern-lines-color-default: #777;--xy-background-pattern-cross-color-default: #777;--xy-node-color-default: #f8f8f8;--xy-node-border-default: 1px solid #3c3c3c;--xy-node-background-color-default: #1e1e1e;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(255, 255, 255, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #999;--xy-handle-background-color-default: #bebebe;--xy-handle-border-color-default: #1e1e1e;--xy-selection-background-color-default: rgba(200, 200, 220, .08);--xy-selection-border-default: 1px dotted rgba(200, 200, 220, .8);--xy-controls-button-background-color-default: #2b2b2b;--xy-controls-button-background-color-hover-default: #3e3e3e;--xy-controls-button-color-default: #f8f8f8;--xy-controls-button-color-hover-default: #fff;--xy-controls-button-border-color-default: #5b5b5b;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #141414;--xy-edge-label-color-default: #f8f8f8}.react-flow__background{background-color:var(--xy-background-color-props, var(--xy-background-color, var(--xy-background-color-default)));pointer-events:none;z-index:-1}.react-flow__container{position:absolute;width:100%;height:100%;top:0;left:0}.react-flow__pane{z-index:1}.react-flow__pane.draggable{cursor:grab}.react-flow__pane.dragging{cursor:grabbing}.react-flow__pane.selection{cursor:pointer}.react-flow__viewport{transform-origin:0 0;z-index:2;pointer-events:none}.react-flow__renderer{z-index:4}.react-flow__selection{z-index:6}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible{outline:none}.react-flow__edge-path{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default));stroke-width:var(--xy-edge-stroke-width, var(--xy-edge-stroke-width-default));fill:none}.react-flow__connection-path{stroke:var(--xy-connectionline-stroke, var(--xy-connectionline-stroke-default));stroke-width:var(--xy-connectionline-stroke-width, var(--xy-connectionline-stroke-width-default));fill:none}.react-flow .react-flow__edges{position:absolute}.react-flow .react-flow__edges svg{overflow:visible;position:absolute;pointer-events:none}.react-flow__edge{pointer-events:visibleStroke}.react-flow__edge.selectable{cursor:pointer}.react-flow__edge.animated path{stroke-dasharray:5;animation:dashdraw .5s linear infinite}.react-flow__edge.animated path.react-flow__edge-interaction{stroke-dasharray:none;animation:none}.react-flow__edge.inactive{pointer-events:none}.react-flow__edge.selected,.react-flow__edge:focus,.react-flow__edge:focus-visible{outline:none}.react-flow__edge.selected .react-flow__edge-path,.react-flow__edge.selectable:focus .react-flow__edge-path,.react-flow__edge.selectable:focus-visible .react-flow__edge-path{stroke:var(--xy-edge-stroke-selected, var(--xy-edge-stroke-selected-default))}.react-flow__edge-textwrapper{pointer-events:all}.react-flow__edge .react-flow__edge-text{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__arrowhead polyline{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__arrowhead polyline.arrowclosed{fill:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__connection{pointer-events:none}.react-flow__connection .animated{stroke-dasharray:5;animation:dashdraw .5s linear infinite}svg.react-flow__connectionline{z-index:1001;overflow:visible;position:absolute}.react-flow__nodes{pointer-events:none;transform-origin:0 0}.react-flow__node{position:absolute;-webkit-user-select:none;-moz-user-select:none;user-select:none;pointer-events:all;transform-origin:0 0;box-sizing:border-box;cursor:default}.react-flow__node.selectable{cursor:pointer}.react-flow__node.draggable{cursor:grab;pointer-events:all}.react-flow__node.draggable.dragging{cursor:grabbing}.react-flow__nodesselection{z-index:3;transform-origin:left top;pointer-events:none}.react-flow__nodesselection-rect{position:absolute;pointer-events:all;cursor:grab}.react-flow__handle{position:absolute;pointer-events:none;min-width:5px;min-height:5px;width:6px;height:6px;background-color:var(--xy-handle-background-color, var(--xy-handle-background-color-default));border:1px solid var(--xy-handle-border-color, var(--xy-handle-border-color-default));border-radius:100%}.react-flow__handle.connectingfrom{pointer-events:all}.react-flow__handle.connectionindicator{pointer-events:all;cursor:crosshair}.react-flow__handle-bottom{top:auto;left:50%;bottom:0;transform:translate(-50%,50%)}.react-flow__handle-top{top:0;left:50%;transform:translate(-50%,-50%)}.react-flow__handle-left{top:50%;left:0;transform:translate(-50%,-50%)}.react-flow__handle-right{top:50%;right:0;transform:translate(50%,-50%)}.react-flow__edgeupdater{cursor:move;pointer-events:all}.react-flow__pane.selection .react-flow__panel{pointer-events:none}.react-flow__panel{position:absolute;z-index:5;margin:15px}.react-flow__panel.top{top:0}.react-flow__panel.bottom{bottom:0}.react-flow__panel.top.center,.react-flow__panel.bottom.center{left:50%;transform:translate(-15px) translate(-50%)}.react-flow__panel.left{left:0}.react-flow__panel.right{right:0}.react-flow__panel.left.center,.react-flow__panel.right.center{top:50%;transform:translateY(-15px) translateY(-50%)}.react-flow__attribution{font-size:10px;background:var(--xy-attribution-background-color, var(--xy-attribution-background-color-default));padding:2px 3px;margin:0}.react-flow__attribution a{text-decoration:none;color:#999}@keyframes dashdraw{0%{stroke-dashoffset:10}}.react-flow__edgelabel-renderer{position:absolute;width:100%;height:100%;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none;left:0;top:0}.react-flow__viewport-portal{position:absolute;width:100%;height:100%;left:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__minimap{background:var( --xy-minimap-background-color-props, var(--xy-minimap-background-color, var(--xy-minimap-background-color-default)) )}.react-flow__minimap-svg{display:block}.react-flow__minimap-mask{fill:var( --xy-minimap-mask-background-color-props, var(--xy-minimap-mask-background-color, var(--xy-minimap-mask-background-color-default)) );stroke:var( --xy-minimap-mask-stroke-color-props, var(--xy-minimap-mask-stroke-color, var(--xy-minimap-mask-stroke-color-default)) );stroke-width:var( --xy-minimap-mask-stroke-width-props, var(--xy-minimap-mask-stroke-width, var(--xy-minimap-mask-stroke-width-default)) )}.react-flow__minimap-node{fill:var( --xy-minimap-node-background-color-props, var(--xy-minimap-node-background-color, var(--xy-minimap-node-background-color-default)) );stroke:var( --xy-minimap-node-stroke-color-props, var(--xy-minimap-node-stroke-color, var(--xy-minimap-node-stroke-color-default)) );stroke-width:var( --xy-minimap-node-stroke-width-props, var(--xy-minimap-node-stroke-width, var(--xy-minimap-node-stroke-width-default)) )}.react-flow__background-pattern.dots{fill:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-dots-color-default)) )}.react-flow__background-pattern.lines{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-lines-color-default)) )}.react-flow__background-pattern.cross{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-cross-color-default)) )}.react-flow__controls{display:flex;flex-direction:column;box-shadow:var(--xy-controls-box-shadow, var(--xy-controls-box-shadow-default))}.react-flow__controls.horizontal{flex-direction:row}.react-flow__controls-button{display:flex;justify-content:center;align-items:center;height:26px;width:26px;padding:4px;border:none;background:var(--xy-controls-button-background-color, var(--xy-controls-button-background-color-default));border-bottom:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) );color:var( --xy-controls-button-color-props, var(--xy-controls-button-color, var(--xy-controls-button-color-default)) );cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__controls-button svg{width:100%;max-width:12px;max-height:12px;fill:currentColor}.react-flow__edge.updating .react-flow__edge-path{stroke:#777}.react-flow__edge-text{font-size:10px}.react-flow__node.selectable:focus,.react-flow__node.selectable:focus-visible{outline:none}.react-flow__node-input,.react-flow__node-default,.react-flow__node-output,.react-flow__node-group{padding:10px;border-radius:var(--xy-node-border-radius, var(--xy-node-border-radius-default));width:150px;font-size:12px;color:var(--xy-node-color, var(--xy-node-color-default));text-align:center;border:var(--xy-node-border, var(--xy-node-border-default));background-color:var(--xy-node-background-color, var(--xy-node-background-color-default))}.react-flow__node-input.selectable:hover,.react-flow__node-default.selectable:hover,.react-flow__node-output.selectable:hover,.react-flow__node-group.selectable:hover{box-shadow:var(--xy-node-boxshadow-hover, var(--xy-node-boxshadow-hover-default))}.react-flow__node-input.selectable.selected,.react-flow__node-input.selectable:focus,.react-flow__node-input.selectable:focus-visible,.react-flow__node-default.selectable.selected,.react-flow__node-default.selectable:focus,.react-flow__node-default.selectable:focus-visible,.react-flow__node-output.selectable.selected,.react-flow__node-output.selectable:focus,.react-flow__node-output.selectable:focus-visible,.react-flow__node-group.selectable.selected,.react-flow__node-group.selectable:focus,.react-flow__node-group.selectable:focus-visible{box-shadow:var(--xy-node-boxshadow-selected, var(--xy-node-boxshadow-selected-default))}.react-flow__node-group{background-color:var(--xy-node-group-background-color, var(--xy-node-group-background-color-default))}.react-flow__nodesselection-rect,.react-flow__selection{background:var(--xy-selection-background-color, var(--xy-selection-background-color-default));border:var(--xy-selection-border, var(--xy-selection-border-default))}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible,.react-flow__selection:focus,.react-flow__selection:focus-visible{outline:none}.react-flow__controls-button:hover{background:var( --xy-controls-button-background-color-hover-props, var(--xy-controls-button-background-color-hover, var(--xy-controls-button-background-color-hover-default)) );color:var( --xy-controls-button-color-hover-props, var(--xy-controls-button-color-hover, var(--xy-controls-button-color-hover-default)) )}.react-flow__controls-button:disabled{pointer-events:none}.react-flow__controls-button:disabled svg{fill-opacity:.4}.react-flow__controls-button:last-child{border-bottom:none}.react-flow__controls.horizontal .react-flow__controls-button{border-bottom:none;border-right:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) )}.react-flow__controls.horizontal .react-flow__controls-button:last-child{border-right:none}.react-flow__resize-control{position:absolute}.react-flow__resize-control.left,.react-flow__resize-control.right{cursor:ew-resize}.react-flow__resize-control.top,.react-flow__resize-control.bottom{cursor:ns-resize}.react-flow__resize-control.top.left,.react-flow__resize-control.bottom.right{cursor:nwse-resize}.react-flow__resize-control.bottom.left,.react-flow__resize-control.top.right{cursor:nesw-resize}.react-flow__resize-control.handle{width:5px;height:5px;border:1px solid #fff;border-radius:1px;background-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));translate:-50% -50%}.react-flow__resize-control.handle.left{left:0;top:50%}.react-flow__resize-control.handle.right{left:100%;top:50%}.react-flow__resize-control.handle.top{left:50%;top:0}.react-flow__resize-control.handle.bottom{left:50%;top:100%}.react-flow__resize-control.handle.top.left,.react-flow__resize-control.handle.bottom.left{left:0}.react-flow__resize-control.handle.top.right,.react-flow__resize-control.handle.bottom.right{left:100%}.react-flow__resize-control.line{border-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));border-width:0;border-style:solid}.react-flow__resize-control.line.left,.react-flow__resize-control.line.right{width:1px;transform:translate(-50%);top:0;height:100%}.react-flow__resize-control.line.left{left:0;border-left-width:1px}.react-flow__resize-control.line.right{left:100%;border-right-width:1px}.react-flow__resize-control.line.top,.react-flow__resize-control.line.bottom{height:1px;transform:translateY(-50%);left:0;width:100%}.react-flow__resize-control.line.top{top:0;border-top-width:1px}.react-flow__resize-control.line.bottom{border-bottom-width:1px;top:100%}.react-flow__edge-textbg{fill:var(--xy-edge-label-background-color, var(--xy-edge-label-background-color-default))}.react-flow__edge-text{fill:var(--xy-edge-label-color, var(--xy-edge-label-color-default))} diff --git a/python/packages/devui/agent_framework_devui/ui/assets/index.js b/python/packages/devui/agent_framework_devui/ui/assets/index.js index 1b05f27842..44836a5b86 100644 --- a/python/packages/devui/agent_framework_devui/ui/assets/index.js +++ b/python/packages/devui/agent_framework_devui/ui/assets/index.js @@ -1,4 +1,4 @@ -function yE(e, n) { for (var r = 0; r < n.length; r++) { const a = n[r]; if (typeof a != "string" && !Array.isArray(a)) { for (const l in a) if (l !== "default" && !(l in e)) { const c = Object.getOwnPropertyDescriptor(a, l); c && Object.defineProperty(e, l, c.get ? c : { enumerable: !0, get: () => a[l] }) } } } return Object.freeze(Object.defineProperty(e, Symbol.toStringTag, { value: "Module" })) } (function () { const n = document.createElement("link").relList; if (n && n.supports && n.supports("modulepreload")) return; for (const l of document.querySelectorAll('link[rel="modulepreload"]')) a(l); new MutationObserver(l => { for (const c of l) if (c.type === "childList") for (const d of c.addedNodes) d.tagName === "LINK" && d.rel === "modulepreload" && a(d) }).observe(document, { childList: !0, subtree: !0 }); function r(l) { const c = {}; return l.integrity && (c.integrity = l.integrity), l.referrerPolicy && (c.referrerPolicy = l.referrerPolicy), l.crossOrigin === "use-credentials" ? c.credentials = "include" : l.crossOrigin === "anonymous" ? c.credentials = "omit" : c.credentials = "same-origin", c } function a(l) { if (l.ep) return; l.ep = !0; const c = r(l); fetch(l.href, c) } })(); function yp(e) { return e && e.__esModule && Object.prototype.hasOwnProperty.call(e, "default") ? e.default : e } var Gm = { exports: {} }, Bi = {};/** +function WE(e,n){for(var r=0;ra[l]})}}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}(function(){const n=document.createElement("link").relList;if(n&&n.supports&&n.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))a(l);new MutationObserver(l=>{for(const c of l)if(c.type==="childList")for(const d of c.addedNodes)d.tagName==="LINK"&&d.rel==="modulepreload"&&a(d)}).observe(document,{childList:!0,subtree:!0});function r(l){const c={};return l.integrity&&(c.integrity=l.integrity),l.referrerPolicy&&(c.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?c.credentials="include":l.crossOrigin==="anonymous"?c.credentials="omit":c.credentials="same-origin",c}function a(l){if(l.ep)return;l.ep=!0;const c=r(l);fetch(l.href,c)}})();function Dp(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var ih={exports:{}},Fi={};/** * @license React * react-jsx-runtime.production.js * @@ -6,7 +6,7 @@ function yE(e, n) { for (var r = 0; r < n.length; r++) { const a = n[r]; if (typ * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. - */var Wy; function vE() { if (Wy) return Bi; Wy = 1; var e = Symbol.for("react.transitional.element"), n = Symbol.for("react.fragment"); function r(a, l, c) { var d = null; if (c !== void 0 && (d = "" + c), l.key !== void 0 && (d = "" + l.key), "key" in l) { c = {}; for (var f in l) f !== "key" && (c[f] = l[f]) } else c = l; return l = c.ref, { $$typeof: e, type: a, key: d, ref: l !== void 0 ? l : null, props: c } } return Bi.Fragment = n, Bi.jsx = r, Bi.jsxs = r, Bi } var Ky; function bE() { return Ky || (Ky = 1, Gm.exports = vE()), Gm.exports } var o = bE(), Xm = { exports: {} }, Ge = {};/** + */var pv;function KE(){if(pv)return Fi;pv=1;var e=Symbol.for("react.transitional.element"),n=Symbol.for("react.fragment");function r(a,l,c){var d=null;if(c!==void 0&&(d=""+c),l.key!==void 0&&(d=""+l.key),"key"in l){c={};for(var f in l)f!=="key"&&(c[f]=l[f])}else c=l;return l=c.ref,{$$typeof:e,type:a,key:d,ref:l!==void 0?l:null,props:c}}return Fi.Fragment=n,Fi.jsx=r,Fi.jsxs=r,Fi}var gv;function QE(){return gv||(gv=1,ih.exports=KE()),ih.exports}var o=QE(),lh={exports:{}},Xe={};/** * @license React * react.production.js * @@ -14,7 +14,7 @@ function yE(e, n) { for (var r = 0; r < n.length; r++) { const a = n[r]; if (typ * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. - */var Qy; function wE() { if (Qy) return Ge; Qy = 1; var e = Symbol.for("react.transitional.element"), n = Symbol.for("react.portal"), r = Symbol.for("react.fragment"), a = Symbol.for("react.strict_mode"), l = Symbol.for("react.profiler"), c = Symbol.for("react.consumer"), d = Symbol.for("react.context"), f = Symbol.for("react.forward_ref"), m = Symbol.for("react.suspense"), h = Symbol.for("react.memo"), g = Symbol.for("react.lazy"), y = Symbol.iterator; function x(C) { return C === null || typeof C != "object" ? null : (C = y && C[y] || C["@@iterator"], typeof C == "function" ? C : null) } var b = { isMounted: function () { return !1 }, enqueueForceUpdate: function () { }, enqueueReplaceState: function () { }, enqueueSetState: function () { } }, S = Object.assign, N = {}; function j(C, $, Y) { this.props = C, this.context = $, this.refs = N, this.updater = Y || b } j.prototype.isReactComponent = {}, j.prototype.setState = function (C, $) { if (typeof C != "object" && typeof C != "function" && C != null) throw Error("takes an object of state variables to update or a function which returns an object of state variables."); this.updater.enqueueSetState(this, C, $, "setState") }, j.prototype.forceUpdate = function (C) { this.updater.enqueueForceUpdate(this, C, "forceUpdate") }; function _() { } _.prototype = j.prototype; function M(C, $, Y) { this.props = C, this.context = $, this.refs = N, this.updater = Y || b } var E = M.prototype = new _; E.constructor = M, S(E, j.prototype), E.isPureReactComponent = !0; var T = Array.isArray, R = { H: null, A: null, T: null, S: null, V: null }, D = Object.prototype.hasOwnProperty; function O(C, $, Y, V, W, fe) { return Y = fe.ref, { $$typeof: e, type: C, key: $, ref: Y !== void 0 ? Y : null, props: fe } } function B(C, $) { return O(C.type, $, void 0, void 0, void 0, C.props) } function q(C) { return typeof C == "object" && C !== null && C.$$typeof === e } function K(C) { var $ = { "=": "=0", ":": "=2" }; return "$" + C.replace(/[=:]/g, function (Y) { return $[Y] }) } var J = /\/+/g; function G(C, $) { return typeof C == "object" && C !== null && C.key != null ? K("" + C.key) : $.toString(36) } function Z() { } function P(C) { switch (C.status) { case "fulfilled": return C.value; case "rejected": throw C.reason; default: switch (typeof C.status == "string" ? C.then(Z, Z) : (C.status = "pending", C.then(function ($) { C.status === "pending" && (C.status = "fulfilled", C.value = $) }, function ($) { C.status === "pending" && (C.status = "rejected", C.reason = $) })), C.status) { case "fulfilled": return C.value; case "rejected": throw C.reason } }throw C } function U(C, $, Y, V, W) { var fe = typeof C; (fe === "undefined" || fe === "boolean") && (C = null); var ue = !1; if (C === null) ue = !0; else switch (fe) { case "bigint": case "string": case "number": ue = !0; break; case "object": switch (C.$$typeof) { case e: case n: ue = !0; break; case g: return ue = C._init, U(ue(C._payload), $, Y, V, W) } }if (ue) return W = W(C), ue = V === "" ? "." + G(C, 0) : V, T(W) ? (Y = "", ue != null && (Y = ue.replace(J, "$&/") + "/"), U(W, $, Y, "", function (ge) { return ge })) : W != null && (q(W) && (W = B(W, Y + (W.key == null || C && C.key === W.key ? "" : ("" + W.key).replace(J, "$&/") + "/") + ue)), $.push(W)), 1; ue = 0; var te = V === "" ? "." : V + ":"; if (T(C)) for (var ie = 0; ie < C.length; ie++)V = C[ie], fe = te + G(V, ie), ue += U(V, $, Y, fe, W); else if (ie = x(C), typeof ie == "function") for (C = ie.call(C), ie = 0; !(V = C.next()).done;)V = V.value, fe = te + G(V, ie++), ue += U(V, $, Y, fe, W); else if (fe === "object") { if (typeof C.then == "function") return U(P(C), $, Y, V, W); throw $ = String(C), Error("Objects are not valid as a React child (found: " + ($ === "[object Object]" ? "object with keys {" + Object.keys(C).join(", ") + "}" : $) + "). If you meant to render a collection of children, use an array instead.") } return ue } function k(C, $, Y) { if (C == null) return C; var V = [], W = 0; return U(C, V, "", "", function (fe) { return $.call(Y, fe, W++) }), V } function L(C) { if (C._status === -1) { var $ = C._result; $ = $(), $.then(function (Y) { (C._status === 0 || C._status === -1) && (C._status = 1, C._result = Y) }, function (Y) { (C._status === 0 || C._status === -1) && (C._status = 2, C._result = Y) }), C._status === -1 && (C._status = 0, C._result = $) } if (C._status === 1) return C._result.default; throw C._result } var I = typeof reportError == "function" ? reportError : function (C) { if (typeof window == "object" && typeof window.ErrorEvent == "function") { var $ = new window.ErrorEvent("error", { bubbles: !0, cancelable: !0, message: typeof C == "object" && C !== null && typeof C.message == "string" ? String(C.message) : String(C), error: C }); if (!window.dispatchEvent($)) return } else if (typeof process == "object" && typeof process.emit == "function") { process.emit("uncaughtException", C); return } console.error(C) }; function H() { } return Ge.Children = { map: k, forEach: function (C, $, Y) { k(C, function () { $.apply(this, arguments) }, Y) }, count: function (C) { var $ = 0; return k(C, function () { $++ }), $ }, toArray: function (C) { return k(C, function ($) { return $ }) || [] }, only: function (C) { if (!q(C)) throw Error("React.Children.only expected to receive a single React element child."); return C } }, Ge.Component = j, Ge.Fragment = r, Ge.Profiler = l, Ge.PureComponent = M, Ge.StrictMode = a, Ge.Suspense = m, Ge.__CLIENT_INTERNALS_DO_NOT_USE_OR_WARN_USERS_THEY_CANNOT_UPGRADE = R, Ge.__COMPILER_RUNTIME = { __proto__: null, c: function (C) { return R.H.useMemoCache(C) } }, Ge.cache = function (C) { return function () { return C.apply(null, arguments) } }, Ge.cloneElement = function (C, $, Y) { if (C == null) throw Error("The argument must be a React element, but you passed " + C + "."); var V = S({}, C.props), W = C.key, fe = void 0; if ($ != null) for (ue in $.ref !== void 0 && (fe = void 0), $.key !== void 0 && (W = "" + $.key), $) !D.call($, ue) || ue === "key" || ue === "__self" || ue === "__source" || ue === "ref" && $.ref === void 0 || (V[ue] = $[ue]); var ue = arguments.length - 2; if (ue === 1) V.children = Y; else if (1 < ue) { for (var te = Array(ue), ie = 0; ie < ue; ie++)te[ie] = arguments[ie + 2]; V.children = te } return O(C.type, W, void 0, void 0, fe, V) }, Ge.createContext = function (C) { return C = { $$typeof: d, _currentValue: C, _currentValue2: C, _threadCount: 0, Provider: null, Consumer: null }, C.Provider = C, C.Consumer = { $$typeof: c, _context: C }, C }, Ge.createElement = function (C, $, Y) { var V, W = {}, fe = null; if ($ != null) for (V in $.key !== void 0 && (fe = "" + $.key), $) D.call($, V) && V !== "key" && V !== "__self" && V !== "__source" && (W[V] = $[V]); var ue = arguments.length - 2; if (ue === 1) W.children = Y; else if (1 < ue) { for (var te = Array(ue), ie = 0; ie < ue; ie++)te[ie] = arguments[ie + 2]; W.children = te } if (C && C.defaultProps) for (V in ue = C.defaultProps, ue) W[V] === void 0 && (W[V] = ue[V]); return O(C, fe, void 0, void 0, null, W) }, Ge.createRef = function () { return { current: null } }, Ge.forwardRef = function (C) { return { $$typeof: f, render: C } }, Ge.isValidElement = q, Ge.lazy = function (C) { return { $$typeof: g, _payload: { _status: -1, _result: C }, _init: L } }, Ge.memo = function (C, $) { return { $$typeof: h, type: C, compare: $ === void 0 ? null : $ } }, Ge.startTransition = function (C) { var $ = R.T, Y = {}; R.T = Y; try { var V = C(), W = R.S; W !== null && W(Y, V), typeof V == "object" && V !== null && typeof V.then == "function" && V.then(H, I) } catch (fe) { I(fe) } finally { R.T = $ } }, Ge.unstable_useCacheRefresh = function () { return R.H.useCacheRefresh() }, Ge.use = function (C) { return R.H.use(C) }, Ge.useActionState = function (C, $, Y) { return R.H.useActionState(C, $, Y) }, Ge.useCallback = function (C, $) { return R.H.useCallback(C, $) }, Ge.useContext = function (C) { return R.H.useContext(C) }, Ge.useDebugValue = function () { }, Ge.useDeferredValue = function (C, $) { return R.H.useDeferredValue(C, $) }, Ge.useEffect = function (C, $, Y) { var V = R.H; if (typeof Y == "function") throw Error("useEffect CRUD overload is not enabled in this build of React."); return V.useEffect(C, $) }, Ge.useId = function () { return R.H.useId() }, Ge.useImperativeHandle = function (C, $, Y) { return R.H.useImperativeHandle(C, $, Y) }, Ge.useInsertionEffect = function (C, $) { return R.H.useInsertionEffect(C, $) }, Ge.useLayoutEffect = function (C, $) { return R.H.useLayoutEffect(C, $) }, Ge.useMemo = function (C, $) { return R.H.useMemo(C, $) }, Ge.useOptimistic = function (C, $) { return R.H.useOptimistic(C, $) }, Ge.useReducer = function (C, $, Y) { return R.H.useReducer(C, $, Y) }, Ge.useRef = function (C) { return R.H.useRef(C) }, Ge.useState = function (C) { return R.H.useState(C) }, Ge.useSyncExternalStore = function (C, $, Y) { return R.H.useSyncExternalStore(C, $, Y) }, Ge.useTransition = function () { return R.H.useTransition() }, Ge.version = "19.1.1", Ge } var Jy; function pl() { return Jy || (Jy = 1, Xm.exports = wE()), Xm.exports } var w = pl(); const xn = yp(w), Jb = yE({ __proto__: null, default: xn }, [w]); var Zm = { exports: {} }, Pi = {}, Wm = { exports: {} }, Km = {};/** + */var xv;function JE(){if(xv)return Xe;xv=1;var e=Symbol.for("react.transitional.element"),n=Symbol.for("react.portal"),r=Symbol.for("react.fragment"),a=Symbol.for("react.strict_mode"),l=Symbol.for("react.profiler"),c=Symbol.for("react.consumer"),d=Symbol.for("react.context"),f=Symbol.for("react.forward_ref"),m=Symbol.for("react.suspense"),h=Symbol.for("react.memo"),g=Symbol.for("react.lazy"),y=Symbol.iterator;function x(C){return C===null||typeof C!="object"?null:(C=y&&C[y]||C["@@iterator"],typeof C=="function"?C:null)}var b={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},j=Object.assign,N={};function S(C,P,Y){this.props=C,this.context=P,this.refs=N,this.updater=Y||b}S.prototype.isReactComponent={},S.prototype.setState=function(C,P){if(typeof C!="object"&&typeof C!="function"&&C!=null)throw Error("takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,C,P,"setState")},S.prototype.forceUpdate=function(C){this.updater.enqueueForceUpdate(this,C,"forceUpdate")};function _(){}_.prototype=S.prototype;function A(C,P,Y){this.props=C,this.context=P,this.refs=N,this.updater=Y||b}var E=A.prototype=new _;E.constructor=A,j(E,S.prototype),E.isPureReactComponent=!0;var M=Array.isArray,R={H:null,A:null,T:null,S:null,V:null},D=Object.prototype.hasOwnProperty;function O(C,P,Y,V,J,ce){return Y=ce.ref,{$$typeof:e,type:C,key:P,ref:Y!==void 0?Y:null,props:ce}}function H(C,P){return O(C.type,P,void 0,void 0,void 0,C.props)}function q(C){return typeof C=="object"&&C!==null&&C.$$typeof===e}function Z(C){var P={"=":"=0",":":"=2"};return"$"+C.replace(/[=:]/g,function(Y){return P[Y]})}var Q=/\/+/g;function G(C,P){return typeof C=="object"&&C!==null&&C.key!=null?Z(""+C.key):P.toString(36)}function ne(){}function U(C){switch(C.status){case"fulfilled":return C.value;case"rejected":throw C.reason;default:switch(typeof C.status=="string"?C.then(ne,ne):(C.status="pending",C.then(function(P){C.status==="pending"&&(C.status="fulfilled",C.value=P)},function(P){C.status==="pending"&&(C.status="rejected",C.reason=P)})),C.status){case"fulfilled":return C.value;case"rejected":throw C.reason}}throw C}function B(C,P,Y,V,J){var ce=typeof C;(ce==="undefined"||ce==="boolean")&&(C=null);var fe=!1;if(C===null)fe=!0;else switch(ce){case"bigint":case"string":case"number":fe=!0;break;case"object":switch(C.$$typeof){case e:case n:fe=!0;break;case g:return fe=C._init,B(fe(C._payload),P,Y,V,J)}}if(fe)return J=J(C),fe=V===""?"."+G(C,0):V,M(J)?(Y="",fe!=null&&(Y=fe.replace(Q,"$&/")+"/"),B(J,P,Y,"",function(ge){return ge})):J!=null&&(q(J)&&(J=H(J,Y+(J.key==null||C&&C.key===J.key?"":(""+J.key).replace(Q,"$&/")+"/")+fe)),P.push(J)),1;fe=0;var ee=V===""?".":V+":";if(M(C))for(var ie=0;ie>> 1, C = k[H]; if (0 < l(C, L)) k[H] = L, k[I] = C, I = H; else break e } } function r(k) { return k.length === 0 ? null : k[0] } function a(k) { if (k.length === 0) return null; var L = k[0], I = k.pop(); if (I !== L) { k[0] = I; e: for (var H = 0, C = k.length, $ = C >>> 1; H < $;) { var Y = 2 * (H + 1) - 1, V = k[Y], W = Y + 1, fe = k[W]; if (0 > l(V, I)) W < C && 0 > l(fe, V) ? (k[H] = fe, k[W] = I, H = W) : (k[H] = V, k[Y] = I, H = Y); else if (W < C && 0 > l(fe, I)) k[H] = fe, k[W] = I, H = W; else break e } } return L } function l(k, L) { var I = k.sortIndex - L.sortIndex; return I !== 0 ? I : k.id - L.id } if (e.unstable_now = void 0, typeof performance == "object" && typeof performance.now == "function") { var c = performance; e.unstable_now = function () { return c.now() } } else { var d = Date, f = d.now(); e.unstable_now = function () { return d.now() - f } } var m = [], h = [], g = 1, y = null, x = 3, b = !1, S = !1, N = !1, j = !1, _ = typeof setTimeout == "function" ? setTimeout : null, M = typeof clearTimeout == "function" ? clearTimeout : null, E = typeof setImmediate < "u" ? setImmediate : null; function T(k) { for (var L = r(h); L !== null;) { if (L.callback === null) a(h); else if (L.startTime <= k) a(h), L.sortIndex = L.expirationTime, n(m, L); else break; L = r(h) } } function R(k) { if (N = !1, T(k), !S) if (r(m) !== null) S = !0, D || (D = !0, G()); else { var L = r(h); L !== null && U(R, L.startTime - k) } } var D = !1, O = -1, B = 5, q = -1; function K() { return j ? !0 : !(e.unstable_now() - q < B) } function J() { if (j = !1, D) { var k = e.unstable_now(); q = k; var L = !0; try { e: { S = !1, N && (N = !1, M(O), O = -1), b = !0; var I = x; try { t: { for (T(k), y = r(m); y !== null && !(y.expirationTime > k && K());) { var H = y.callback; if (typeof H == "function") { y.callback = null, x = y.priorityLevel; var C = H(y.expirationTime <= k); if (k = e.unstable_now(), typeof C == "function") { y.callback = C, T(k), L = !0; break t } y === r(m) && a(m), T(k) } else a(m); y = r(m) } if (y !== null) L = !0; else { var $ = r(h); $ !== null && U(R, $.startTime - k), L = !1 } } break e } finally { y = null, x = I, b = !1 } L = void 0 } } finally { L ? G() : D = !1 } } } var G; if (typeof E == "function") G = function () { E(J) }; else if (typeof MessageChannel < "u") { var Z = new MessageChannel, P = Z.port2; Z.port1.onmessage = J, G = function () { P.postMessage(null) } } else G = function () { _(J, 0) }; function U(k, L) { O = _(function () { k(e.unstable_now()) }, L) } e.unstable_IdlePriority = 5, e.unstable_ImmediatePriority = 1, e.unstable_LowPriority = 4, e.unstable_NormalPriority = 3, e.unstable_Profiling = null, e.unstable_UserBlockingPriority = 2, e.unstable_cancelCallback = function (k) { k.callback = null }, e.unstable_forceFrameRate = function (k) { 0 > k || 125 < k ? console.error("forceFrameRate takes a positive int between 0 and 125, forcing frame rates higher than 125 fps is not supported") : B = 0 < k ? Math.floor(1e3 / k) : 5 }, e.unstable_getCurrentPriorityLevel = function () { return x }, e.unstable_next = function (k) { switch (x) { case 1: case 2: case 3: var L = 3; break; default: L = x }var I = x; x = L; try { return k() } finally { x = I } }, e.unstable_requestPaint = function () { j = !0 }, e.unstable_runWithPriority = function (k, L) { switch (k) { case 1: case 2: case 3: case 4: case 5: break; default: k = 3 }var I = x; x = k; try { return L() } finally { x = I } }, e.unstable_scheduleCallback = function (k, L, I) { var H = e.unstable_now(); switch (typeof I == "object" && I !== null ? (I = I.delay, I = typeof I == "number" && 0 < I ? H + I : H) : I = H, k) { case 1: var C = -1; break; case 2: C = 250; break; case 5: C = 1073741823; break; case 4: C = 1e4; break; default: C = 5e3 }return C = I + C, k = { id: g++, callback: L, priorityLevel: k, startTime: I, expirationTime: C, sortIndex: -1 }, I > H ? (k.sortIndex = I, n(h, k), r(m) === null && k === r(h) && (N ? (M(O), O = -1) : N = !0, U(R, I - H))) : (k.sortIndex = C, n(m, k), S || b || (S = !0, D || (D = !0, G()))), k }, e.unstable_shouldYield = K, e.unstable_wrapCallback = function (k) { var L = x; return function () { var I = x; x = L; try { return k.apply(this, arguments) } finally { x = I } } } })(Km)), Km } var tv; function SE() { return tv || (tv = 1, Wm.exports = NE()), Wm.exports } var Qm = { exports: {} }, Wt = {};/** + */var vv;function eC(){return vv||(vv=1,(function(e){function n(k,L){var I=k.length;k.push(L);e:for(;0>>1,C=k[$];if(0>>1;$l(V,I))Jl(ce,V)?(k[$]=ce,k[J]=I,$=J):(k[$]=V,k[Y]=I,$=Y);else if(Jl(ce,I))k[$]=ce,k[J]=I,$=J;else break e}}return L}function l(k,L){var I=k.sortIndex-L.sortIndex;return I!==0?I:k.id-L.id}if(e.unstable_now=void 0,typeof performance=="object"&&typeof performance.now=="function"){var c=performance;e.unstable_now=function(){return c.now()}}else{var d=Date,f=d.now();e.unstable_now=function(){return d.now()-f}}var m=[],h=[],g=1,y=null,x=3,b=!1,j=!1,N=!1,S=!1,_=typeof setTimeout=="function"?setTimeout:null,A=typeof clearTimeout=="function"?clearTimeout:null,E=typeof setImmediate<"u"?setImmediate:null;function M(k){for(var L=r(h);L!==null;){if(L.callback===null)a(h);else if(L.startTime<=k)a(h),L.sortIndex=L.expirationTime,n(m,L);else break;L=r(h)}}function R(k){if(N=!1,M(k),!j)if(r(m)!==null)j=!0,D||(D=!0,G());else{var L=r(h);L!==null&&B(R,L.startTime-k)}}var D=!1,O=-1,H=5,q=-1;function Z(){return S?!0:!(e.unstable_now()-qk&&Z());){var $=y.callback;if(typeof $=="function"){y.callback=null,x=y.priorityLevel;var C=$(y.expirationTime<=k);if(k=e.unstable_now(),typeof C=="function"){y.callback=C,M(k),L=!0;break t}y===r(m)&&a(m),M(k)}else a(m);y=r(m)}if(y!==null)L=!0;else{var P=r(h);P!==null&&B(R,P.startTime-k),L=!1}}break e}finally{y=null,x=I,b=!1}L=void 0}}finally{L?G():D=!1}}}var G;if(typeof E=="function")G=function(){E(Q)};else if(typeof MessageChannel<"u"){var ne=new MessageChannel,U=ne.port2;ne.port1.onmessage=Q,G=function(){U.postMessage(null)}}else G=function(){_(Q,0)};function B(k,L){O=_(function(){k(e.unstable_now())},L)}e.unstable_IdlePriority=5,e.unstable_ImmediatePriority=1,e.unstable_LowPriority=4,e.unstable_NormalPriority=3,e.unstable_Profiling=null,e.unstable_UserBlockingPriority=2,e.unstable_cancelCallback=function(k){k.callback=null},e.unstable_forceFrameRate=function(k){0>k||125$?(k.sortIndex=I,n(h,k),r(m)===null&&k===r(h)&&(N?(A(O),O=-1):N=!0,B(R,I-$))):(k.sortIndex=C,n(m,k),j||b||(j=!0,D||(D=!0,G()))),k},e.unstable_shouldYield=Z,e.unstable_wrapCallback=function(k){var L=x;return function(){var I=x;x=L;try{return k.apply(this,arguments)}finally{x=I}}}})(dh)),dh}var bv;function tC(){return bv||(bv=1,uh.exports=eC()),uh.exports}var fh={exports:{}},Jt={};/** * @license React * react-dom.production.js * @@ -30,7 +30,7 @@ function yE(e, n) { for (var r = 0; r < n.length; r++) { const a = n[r]; if (typ * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. - */var nv; function jE() { if (nv) return Wt; nv = 1; var e = pl(); function n(m) { var h = "https://react.dev/errors/" + m; if (1 < arguments.length) { h += "?args[]=" + encodeURIComponent(arguments[1]); for (var g = 2; g < arguments.length; g++)h += "&args[]=" + encodeURIComponent(arguments[g]) } return "Minified React error #" + m + "; visit " + h + " for the full message or use the non-minified dev environment for full errors and additional helpful warnings." } function r() { } var a = { d: { f: r, r: function () { throw Error(n(522)) }, D: r, C: r, L: r, m: r, X: r, S: r, M: r }, p: 0, findDOMNode: null }, l = Symbol.for("react.portal"); function c(m, h, g) { var y = 3 < arguments.length && arguments[3] !== void 0 ? arguments[3] : null; return { $$typeof: l, key: y == null ? null : "" + y, children: m, containerInfo: h, implementation: g } } var d = e.__CLIENT_INTERNALS_DO_NOT_USE_OR_WARN_USERS_THEY_CANNOT_UPGRADE; function f(m, h) { if (m === "font") return ""; if (typeof h == "string") return h === "use-credentials" ? h : "" } return Wt.__DOM_INTERNALS_DO_NOT_USE_OR_WARN_USERS_THEY_CANNOT_UPGRADE = a, Wt.createPortal = function (m, h) { var g = 2 < arguments.length && arguments[2] !== void 0 ? arguments[2] : null; if (!h || h.nodeType !== 1 && h.nodeType !== 9 && h.nodeType !== 11) throw Error(n(299)); return c(m, h, null, g) }, Wt.flushSync = function (m) { var h = d.T, g = a.p; try { if (d.T = null, a.p = 2, m) return m() } finally { d.T = h, a.p = g, a.d.f() } }, Wt.preconnect = function (m, h) { typeof m == "string" && (h ? (h = h.crossOrigin, h = typeof h == "string" ? h === "use-credentials" ? h : "" : void 0) : h = null, a.d.C(m, h)) }, Wt.prefetchDNS = function (m) { typeof m == "string" && a.d.D(m) }, Wt.preinit = function (m, h) { if (typeof m == "string" && h && typeof h.as == "string") { var g = h.as, y = f(g, h.crossOrigin), x = typeof h.integrity == "string" ? h.integrity : void 0, b = typeof h.fetchPriority == "string" ? h.fetchPriority : void 0; g === "style" ? a.d.S(m, typeof h.precedence == "string" ? h.precedence : void 0, { crossOrigin: y, integrity: x, fetchPriority: b }) : g === "script" && a.d.X(m, { crossOrigin: y, integrity: x, fetchPriority: b, nonce: typeof h.nonce == "string" ? h.nonce : void 0 }) } }, Wt.preinitModule = function (m, h) { if (typeof m == "string") if (typeof h == "object" && h !== null) { if (h.as == null || h.as === "script") { var g = f(h.as, h.crossOrigin); a.d.M(m, { crossOrigin: g, integrity: typeof h.integrity == "string" ? h.integrity : void 0, nonce: typeof h.nonce == "string" ? h.nonce : void 0 }) } } else h == null && a.d.M(m) }, Wt.preload = function (m, h) { if (typeof m == "string" && typeof h == "object" && h !== null && typeof h.as == "string") { var g = h.as, y = f(g, h.crossOrigin); a.d.L(m, g, { crossOrigin: y, integrity: typeof h.integrity == "string" ? h.integrity : void 0, nonce: typeof h.nonce == "string" ? h.nonce : void 0, type: typeof h.type == "string" ? h.type : void 0, fetchPriority: typeof h.fetchPriority == "string" ? h.fetchPriority : void 0, referrerPolicy: typeof h.referrerPolicy == "string" ? h.referrerPolicy : void 0, imageSrcSet: typeof h.imageSrcSet == "string" ? h.imageSrcSet : void 0, imageSizes: typeof h.imageSizes == "string" ? h.imageSizes : void 0, media: typeof h.media == "string" ? h.media : void 0 }) } }, Wt.preloadModule = function (m, h) { if (typeof m == "string") if (h) { var g = f(h.as, h.crossOrigin); a.d.m(m, { as: typeof h.as == "string" && h.as !== "script" ? h.as : void 0, crossOrigin: g, integrity: typeof h.integrity == "string" ? h.integrity : void 0 }) } else a.d.m(m) }, Wt.requestFormReset = function (m) { a.d.r(m) }, Wt.unstable_batchedUpdates = function (m, h) { return m(h) }, Wt.useFormState = function (m, h, g) { return d.H.useFormState(m, h, g) }, Wt.useFormStatus = function () { return d.H.useHostTransitionStatus() }, Wt.version = "19.1.1", Wt } var sv; function ew() { if (sv) return Qm.exports; sv = 1; function e() { if (!(typeof __REACT_DEVTOOLS_GLOBAL_HOOK__ > "u" || typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE != "function")) try { __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e) } catch (n) { console.error(n) } } return e(), Qm.exports = jE(), Qm.exports }/** + */var wv;function nC(){if(wv)return Jt;wv=1;var e=Nl();function n(m){var h="https://react.dev/errors/"+m;if(1"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(n){console.error(n)}}return e(),fh.exports=nC(),fh.exports}/** * @license React * react-dom-client.production.js * @@ -38,475 +38,424 @@ function yE(e, n) { for (var r = 0; r < n.length; r++) { const a = n[r]; if (typ * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. - */var rv; function _E() { - if (rv) return Pi; rv = 1; var e = SE(), n = pl(), r = ew(); function a(t) { var s = "https://react.dev/errors/" + t; if (1 < arguments.length) { s += "?args[]=" + encodeURIComponent(arguments[1]); for (var i = 2; i < arguments.length; i++)s += "&args[]=" + encodeURIComponent(arguments[i]) } return "Minified React error #" + t + "; visit " + s + " for the full message or use the non-minified dev environment for full errors and additional helpful warnings." } function l(t) { return !(!t || t.nodeType !== 1 && t.nodeType !== 9 && t.nodeType !== 11) } function c(t) { var s = t, i = t; if (t.alternate) for (; s.return;)s = s.return; else { t = s; do s = t, (s.flags & 4098) !== 0 && (i = s.return), t = s.return; while (t) } return s.tag === 3 ? i : null } function d(t) { if (t.tag === 13) { var s = t.memoizedState; if (s === null && (t = t.alternate, t !== null && (s = t.memoizedState)), s !== null) return s.dehydrated } return null } function f(t) { if (c(t) !== t) throw Error(a(188)) } function m(t) { var s = t.alternate; if (!s) { if (s = c(t), s === null) throw Error(a(188)); return s !== t ? null : t } for (var i = t, u = s; ;) { var p = i.return; if (p === null) break; var v = p.alternate; if (v === null) { if (u = p.return, u !== null) { i = u; continue } break } if (p.child === v.child) { for (v = p.child; v;) { if (v === i) return f(p), t; if (v === u) return f(p), s; v = v.sibling } throw Error(a(188)) } if (i.return !== u.return) i = p, u = v; else { for (var A = !1, z = p.child; z;) { if (z === i) { A = !0, i = p, u = v; break } if (z === u) { A = !0, u = p, i = v; break } z = z.sibling } if (!A) { for (z = v.child; z;) { if (z === i) { A = !0, i = v, u = p; break } if (z === u) { A = !0, u = v, i = p; break } z = z.sibling } if (!A) throw Error(a(189)) } } if (i.alternate !== u) throw Error(a(190)) } if (i.tag !== 3) throw Error(a(188)); return i.stateNode.current === i ? t : s } function h(t) { var s = t.tag; if (s === 5 || s === 26 || s === 27 || s === 6) return t; for (t = t.child; t !== null;) { if (s = h(t), s !== null) return s; t = t.sibling } return null } var g = Object.assign, y = Symbol.for("react.element"), x = Symbol.for("react.transitional.element"), b = Symbol.for("react.portal"), S = Symbol.for("react.fragment"), N = Symbol.for("react.strict_mode"), j = Symbol.for("react.profiler"), _ = Symbol.for("react.provider"), M = Symbol.for("react.consumer"), E = Symbol.for("react.context"), T = Symbol.for("react.forward_ref"), R = Symbol.for("react.suspense"), D = Symbol.for("react.suspense_list"), O = Symbol.for("react.memo"), B = Symbol.for("react.lazy"), q = Symbol.for("react.activity"), K = Symbol.for("react.memo_cache_sentinel"), J = Symbol.iterator; function G(t) { return t === null || typeof t != "object" ? null : (t = J && t[J] || t["@@iterator"], typeof t == "function" ? t : null) } var Z = Symbol.for("react.client.reference"); function P(t) { if (t == null) return null; if (typeof t == "function") return t.$$typeof === Z ? null : t.displayName || t.name || null; if (typeof t == "string") return t; switch (t) { case S: return "Fragment"; case j: return "Profiler"; case N: return "StrictMode"; case R: return "Suspense"; case D: return "SuspenseList"; case q: return "Activity" }if (typeof t == "object") switch (t.$$typeof) { case b: return "Portal"; case E: return (t.displayName || "Context") + ".Provider"; case M: return (t._context.displayName || "Context") + ".Consumer"; case T: var s = t.render; return t = t.displayName, t || (t = s.displayName || s.name || "", t = t !== "" ? "ForwardRef(" + t + ")" : "ForwardRef"), t; case O: return s = t.displayName || null, s !== null ? s : P(t.type) || "Memo"; case B: s = t._payload, t = t._init; try { return P(t(s)) } catch { } }return null } var U = Array.isArray, k = n.__CLIENT_INTERNALS_DO_NOT_USE_OR_WARN_USERS_THEY_CANNOT_UPGRADE, L = r.__DOM_INTERNALS_DO_NOT_USE_OR_WARN_USERS_THEY_CANNOT_UPGRADE, I = { pending: !1, data: null, method: null, action: null }, H = [], C = -1; function $(t) { return { current: t } } function Y(t) { 0 > C || (t.current = H[C], H[C] = null, C--) } function V(t, s) { C++, H[C] = t.current, t.current = s } var W = $(null), fe = $(null), ue = $(null), te = $(null); function ie(t, s) { switch (V(ue, s), V(fe, t), V(W, null), s.nodeType) { case 9: case 11: t = (t = s.documentElement) && (t = t.namespaceURI) ? jy(t) : 0; break; default: if (t = s.tagName, s = s.namespaceURI) s = jy(s), t = _y(s, t); else switch (t) { case "svg": t = 1; break; case "math": t = 2; break; default: t = 0 } }Y(W), V(W, t) } function ge() { Y(W), Y(fe), Y(ue) } function be(t) { t.memoizedState !== null && V(te, t); var s = W.current, i = _y(s, t.type); s !== i && (V(fe, t), V(W, i)) } function we(t) { fe.current === t && (Y(W), Y(fe)), te.current === t && (Y(te), zi._currentValue = I) } var ne = Object.prototype.hasOwnProperty, pe = e.unstable_scheduleCallback, he = e.unstable_cancelCallback, ee = e.unstable_shouldYield, ve = e.unstable_requestPaint, ye = e.unstable_now, Te = e.unstable_getCurrentPriorityLevel, je = e.unstable_ImmediatePriority, $e = e.unstable_UserBlockingPriority, it = e.unstable_NormalPriority, ze = e.unstable_LowPriority, Se = e.unstable_IdlePriority, Pe = e.log, Ee = e.unstable_setDisableYieldValue, He = null, Fe = null; function Nt(t) { if (typeof Pe == "function" && Ee(t), Fe && typeof Fe.setStrictMode == "function") try { Fe.setStrictMode(He, t) } catch { } } var yt = Math.clz32 ? Math.clz32 : xe, hs = Math.log, wo = Math.LN2; function xe(t) { return t >>>= 0, t === 0 ? 32 : 31 - (hs(t) / wo | 0) | 0 } var Re = 256, Ue = 4194304; function Et(t) { var s = t & 42; if (s !== 0) return s; switch (t & -t) { case 1: return 1; case 2: return 2; case 4: return 4; case 8: return 8; case 16: return 16; case 32: return 32; case 64: return 64; case 128: return 128; case 256: case 512: case 1024: case 2048: case 4096: case 8192: case 16384: case 32768: case 65536: case 131072: case 262144: case 524288: case 1048576: case 2097152: return t & 4194048; case 4194304: case 8388608: case 16777216: case 33554432: return t & 62914560; case 67108864: return 67108864; case 134217728: return 134217728; case 268435456: return 268435456; case 536870912: return 536870912; case 1073741824: return 0; default: return t } } function Dn(t, s, i) { var u = t.pendingLanes; if (u === 0) return 0; var p = 0, v = t.suspendedLanes, A = t.pingedLanes; t = t.warmLanes; var z = u & 134217727; return z !== 0 ? (u = z & ~v, u !== 0 ? p = Et(u) : (A &= z, A !== 0 ? p = Et(A) : i || (i = z & ~t, i !== 0 && (p = Et(i))))) : (z = u & ~v, z !== 0 ? p = Et(z) : A !== 0 ? p = Et(A) : i || (i = u & ~t, i !== 0 && (p = Et(i)))), p === 0 ? 0 : s !== 0 && s !== p && (s & v) === 0 && (v = p & -p, i = s & -s, v >= i || v === 32 && (i & 4194048) !== 0) ? s : p } function Le(t, s) { return (t.pendingLanes & ~(t.suspendedLanes & ~t.pingedLanes) & s) === 0 } function Ne(t, s) { switch (t) { case 1: case 2: case 4: case 8: case 64: return s + 250; case 16: case 32: case 128: case 256: case 512: case 1024: case 2048: case 4096: case 8192: case 16384: case 32768: case 65536: case 131072: case 262144: case 524288: case 1048576: case 2097152: return s + 5e3; case 4194304: case 8388608: case 16777216: case 33554432: return -1; case 67108864: case 134217728: case 268435456: case 536870912: case 1073741824: return -1; default: return -1 } } function lt() { var t = Re; return Re <<= 1, (Re & 4194048) === 0 && (Re = 256), t } function ot() { var t = Ue; return Ue <<= 1, (Ue & 62914560) === 0 && (Ue = 4194304), t } function At(t) { for (var s = [], i = 0; 31 > i; i++)s.push(t); return s } function en(t, s) { t.pendingLanes |= s, s !== 268435456 && (t.suspendedLanes = 0, t.pingedLanes = 0, t.warmLanes = 0) } function On(t, s, i, u, p, v) { var A = t.pendingLanes; t.pendingLanes = i, t.suspendedLanes = 0, t.pingedLanes = 0, t.warmLanes = 0, t.expiredLanes &= i, t.entangledLanes &= i, t.errorRecoveryDisabledLanes &= i, t.shellSuspendCounter = 0; var z = t.entanglements, F = t.expirationTimes, re = t.hiddenUpdates; for (i = A & ~i; 0 < i;) { var le = 31 - yt(i), me = 1 << le; z[le] = 0, F[le] = -1; var oe = re[le]; if (oe !== null) for (re[le] = null, le = 0; le < oe.length; le++) { var ae = oe[le]; ae !== null && (ae.lane &= -536870913) } i &= ~me } u !== 0 && Gn(t, u, 0), v !== 0 && p === 0 && t.tag !== 0 && (t.suspendedLanes |= v & ~(A & ~s)) } function Gn(t, s, i) { t.pendingLanes |= s, t.suspendedLanes &= ~s; var u = 31 - yt(s); t.entangledLanes |= s, t.entanglements[u] = t.entanglements[u] | 1073741824 | i & 4194090 } function La(t, s) { var i = t.entangledLanes |= s; for (t = t.entanglements; i;) { var u = 31 - yt(i), p = 1 << u; p & s | t[u] & s && (t[u] |= s), i &= ~p } } function Ha(t) { switch (t) { case 2: t = 1; break; case 8: t = 4; break; case 32: t = 16; break; case 256: case 512: case 1024: case 2048: case 4096: case 8192: case 16384: case 32768: case 65536: case 131072: case 262144: case 524288: case 1048576: case 2097152: case 4194304: case 8388608: case 16777216: case 33554432: t = 128; break; case 268435456: t = 134217728; break; default: t = 0 }return t } function $a(t) { return t &= -t, 2 < t ? 8 < t ? (t & 134217727) !== 0 ? 32 : 268435456 : 8 : 2 } function Ml() { var t = L.p; return t !== 0 ? t : (t = window.event, t === void 0 ? 32 : qy(t.type)) } function Ld(t, s) { var i = L.p; try { return L.p = t, s() } finally { L.p = i } } var Xn = Math.random().toString(36).slice(2), Ht = "__reactFiber$" + Xn, Xt = "__reactProps$" + Xn, Fs = "__reactContainer$" + Xn, Ba = "__reactEvents$" + Xn, Hd = "__reactListeners$" + Xn, $d = "__reactHandles$" + Xn, Tl = "__reactResources$" + Xn, Or = "__reactMarker$" + Xn; function Pa(t) { delete t[Ht], delete t[Xt], delete t[Ba], delete t[Hd], delete t[$d] } function Ys(t) { var s = t[Ht]; if (s) return s; for (var i = t.parentNode; i;) { if (s = i[Fs] || i[Ht]) { if (i = s.alternate, s.child !== null || i !== null && i.child !== null) for (t = Ay(t); t !== null;) { if (i = t[Ht]) return i; t = Ay(t) } return s } t = i, i = t.parentNode } return null } function ps(t) { if (t = t[Ht] || t[Fs]) { var s = t.tag; if (s === 5 || s === 6 || s === 13 || s === 26 || s === 27 || s === 3) return t } return null } function Gs(t) { var s = t.tag; if (s === 5 || s === 26 || s === 27 || s === 6) return t.stateNode; throw Error(a(33)) } function gs(t) { var s = t[Tl]; return s || (s = t[Tl] = { hoistableStyles: new Map, hoistableScripts: new Map }), s } function Mt(t) { t[Or] = !0 } var Rl = new Set, Dl = {}; function xs(t, s) { Xs(t, s), Xs(t + "Capture", s) } function Xs(t, s) { for (Dl[t] = s, t = 0; t < s.length; t++)Rl.add(s[t]) } var Bd = RegExp("^[:A-Z_a-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD][:A-Z_a-z\\u00C0-\\u00D6\\u00D8-\\u00F6\\u00F8-\\u02FF\\u0370-\\u037D\\u037F-\\u1FFF\\u200C-\\u200D\\u2070-\\u218F\\u2C00-\\u2FEF\\u3001-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFFD\\-.0-9\\u00B7\\u0300-\\u036F\\u203F-\\u2040]*$"), Ua = {}, Ol = {}; function Pd(t) { return ne.call(Ol, t) ? !0 : ne.call(Ua, t) ? !1 : Bd.test(t) ? Ol[t] = !0 : (Ua[t] = !0, !1) } function No(t, s, i) { if (Pd(s)) if (i === null) t.removeAttribute(s); else { switch (typeof i) { case "undefined": case "function": case "symbol": t.removeAttribute(s); return; case "boolean": var u = s.toLowerCase().slice(0, 5); if (u !== "data-" && u !== "aria-") { t.removeAttribute(s); return } }t.setAttribute(s, "" + i) } } function So(t, s, i) { if (i === null) t.removeAttribute(s); else { switch (typeof i) { case "undefined": case "function": case "symbol": case "boolean": t.removeAttribute(s); return }t.setAttribute(s, "" + i) } } function zn(t, s, i, u) { if (u === null) t.removeAttribute(i); else { switch (typeof u) { case "undefined": case "function": case "symbol": case "boolean": t.removeAttribute(i); return }t.setAttributeNS(s, i, "" + u) } } var Va, zl; function ys(t) { - if (Va === void 0) try { throw Error() } catch (i) { - var s = i.stack.trim().match(/\n( *(at )?)/); Va = s && s[1] || "", zl = -1 < i.stack.indexOf(` - at`) ? " ()" : -1 < i.stack.indexOf("@") ? "@unknown:0:0" : "" - } return ` -`+ Va + t + zl - } var qa = !1; function Fa(t, s) { - if (!t || qa) return ""; qa = !0; var i = Error.prepareStackTrace; Error.prepareStackTrace = void 0; try { - var u = { DetermineComponentFrameRoot: function () { try { if (s) { var me = function () { throw Error() }; if (Object.defineProperty(me.prototype, "props", { set: function () { throw Error() } }), typeof Reflect == "object" && Reflect.construct) { try { Reflect.construct(me, []) } catch (ae) { var oe = ae } Reflect.construct(t, [], me) } else { try { me.call() } catch (ae) { oe = ae } t.call(me.prototype) } } else { try { throw Error() } catch (ae) { oe = ae } (me = t()) && typeof me.catch == "function" && me.catch(function () { }) } } catch (ae) { if (ae && oe && typeof ae.stack == "string") return [ae.stack, oe.stack] } return [null, null] } }; u.DetermineComponentFrameRoot.displayName = "DetermineComponentFrameRoot"; var p = Object.getOwnPropertyDescriptor(u.DetermineComponentFrameRoot, "name"); p && p.configurable && Object.defineProperty(u.DetermineComponentFrameRoot, "name", { value: "DetermineComponentFrameRoot" }); var v = u.DetermineComponentFrameRoot(), A = v[0], z = v[1]; if (A && z) { - var F = A.split(` -`), re = z.split(` -`); for (p = u = 0; u < F.length && !F[u].includes("DetermineComponentFrameRoot");)u++; for (; p < re.length && !re[p].includes("DetermineComponentFrameRoot");)p++; if (u === F.length || p === re.length) for (u = F.length - 1, p = re.length - 1; 1 <= u && 0 <= p && F[u] !== re[p];)p--; for (; 1 <= u && 0 <= p; u--, p--)if (F[u] !== re[p]) { - if (u !== 1 || p !== 1) do if (u--, p--, 0 > p || F[u] !== re[p]) { - var le = ` -`+ F[u].replace(" at new ", " at "); return t.displayName && le.includes("") && (le = le.replace("", t.displayName)), le - } while (1 <= u && 0 <= p); break - } - } - } finally { qa = !1, Error.prepareStackTrace = i } return (i = t ? t.displayName || t.name : "") ? ys(i) : "" - } function Ud(t) { switch (t.tag) { case 26: case 27: case 5: return ys(t.type); case 16: return ys("Lazy"); case 13: return ys("Suspense"); case 19: return ys("SuspenseList"); case 0: case 15: return Fa(t.type, !1); case 11: return Fa(t.type.render, !1); case 1: return Fa(t.type, !0); case 31: return ys("Activity"); default: return "" } } function Il(t) { - try { var s = ""; do s += Ud(t), t = t.return; while (t); return s } catch (i) { - return ` -Error generating stack: `+ i.message + ` -`+ i.stack - } - } function tn(t) { switch (typeof t) { case "bigint": case "boolean": case "number": case "string": case "undefined": return t; case "object": return t; default: return "" } } function Ll(t) { var s = t.type; return (t = t.nodeName) && t.toLowerCase() === "input" && (s === "checkbox" || s === "radio") } function Vd(t) { var s = Ll(t) ? "checked" : "value", i = Object.getOwnPropertyDescriptor(t.constructor.prototype, s), u = "" + t[s]; if (!t.hasOwnProperty(s) && typeof i < "u" && typeof i.get == "function" && typeof i.set == "function") { var p = i.get, v = i.set; return Object.defineProperty(t, s, { configurable: !0, get: function () { return p.call(this) }, set: function (A) { u = "" + A, v.call(this, A) } }), Object.defineProperty(t, s, { enumerable: i.enumerable }), { getValue: function () { return u }, setValue: function (A) { u = "" + A }, stopTracking: function () { t._valueTracker = null, delete t[s] } } } } function jo(t) { t._valueTracker || (t._valueTracker = Vd(t)) } function Ya(t) { if (!t) return !1; var s = t._valueTracker; if (!s) return !0; var i = s.getValue(), u = ""; return t && (u = Ll(t) ? t.checked ? "true" : "false" : t.value), t = u, t !== i ? (s.setValue(t), !0) : !1 } function _o(t) { if (t = t || (typeof document < "u" ? document : void 0), typeof t > "u") return null; try { return t.activeElement || t.body } catch { return t.body } } var qd = /[\n"\\]/g; function nn(t) { return t.replace(qd, function (s) { return "\\" + s.charCodeAt(0).toString(16) + " " }) } function zr(t, s, i, u, p, v, A, z) { t.name = "", A != null && typeof A != "function" && typeof A != "symbol" && typeof A != "boolean" ? t.type = A : t.removeAttribute("type"), s != null ? A === "number" ? (s === 0 && t.value === "" || t.value != s) && (t.value = "" + tn(s)) : t.value !== "" + tn(s) && (t.value = "" + tn(s)) : A !== "submit" && A !== "reset" || t.removeAttribute("value"), s != null ? Ga(t, A, tn(s)) : i != null ? Ga(t, A, tn(i)) : u != null && t.removeAttribute("value"), p == null && v != null && (t.defaultChecked = !!v), p != null && (t.checked = p && typeof p != "function" && typeof p != "symbol"), z != null && typeof z != "function" && typeof z != "symbol" && typeof z != "boolean" ? t.name = "" + tn(z) : t.removeAttribute("name") } function Hl(t, s, i, u, p, v, A, z) { if (v != null && typeof v != "function" && typeof v != "symbol" && typeof v != "boolean" && (t.type = v), s != null || i != null) { if (!(v !== "submit" && v !== "reset" || s != null)) return; i = i != null ? "" + tn(i) : "", s = s != null ? "" + tn(s) : i, z || s === t.value || (t.value = s), t.defaultValue = s } u = u ?? p, u = typeof u != "function" && typeof u != "symbol" && !!u, t.checked = z ? t.checked : !!u, t.defaultChecked = !!u, A != null && typeof A != "function" && typeof A != "symbol" && typeof A != "boolean" && (t.name = A) } function Ga(t, s, i) { s === "number" && _o(t.ownerDocument) === t || t.defaultValue === "" + i || (t.defaultValue = "" + i) } function vs(t, s, i, u) { if (t = t.options, s) { s = {}; for (var p = 0; p < i.length; p++)s["$" + i[p]] = !0; for (i = 0; i < t.length; i++)p = s.hasOwnProperty("$" + t[i].value), t[i].selected !== p && (t[i].selected = p), p && u && (t[i].defaultSelected = !0) } else { for (i = "" + tn(i), s = null, p = 0; p < t.length; p++) { if (t[p].value === i) { t[p].selected = !0, u && (t[p].defaultSelected = !0); return } s !== null || t[p].disabled || (s = t[p]) } s !== null && (s.selected = !0) } } function Ng(t, s, i) { if (s != null && (s = "" + tn(s), s !== t.value && (t.value = s), i == null)) { t.defaultValue !== s && (t.defaultValue = s); return } t.defaultValue = i != null ? "" + tn(i) : "" } function Sg(t, s, i, u) { if (s == null) { if (u != null) { if (i != null) throw Error(a(92)); if (U(u)) { if (1 < u.length) throw Error(a(93)); u = u[0] } i = u } i == null && (i = ""), s = i } i = tn(s), t.defaultValue = i, u = t.textContent, u === i && u !== "" && u !== null && (t.value = u) } function Eo(t, s) { if (s) { var i = t.firstChild; if (i && i === t.lastChild && i.nodeType === 3) { i.nodeValue = s; return } } t.textContent = s } var pj = new Set("animationIterationCount aspectRatio borderImageOutset borderImageSlice borderImageWidth boxFlex boxFlexGroup boxOrdinalGroup columnCount columns flex flexGrow flexPositive flexShrink flexNegative flexOrder gridArea gridRow gridRowEnd gridRowSpan gridRowStart gridColumn gridColumnEnd gridColumnSpan gridColumnStart fontWeight lineClamp lineHeight opacity order orphans scale tabSize widows zIndex zoom fillOpacity floodOpacity stopOpacity strokeDasharray strokeDashoffset strokeMiterlimit strokeOpacity strokeWidth MozAnimationIterationCount MozBoxFlex MozBoxFlexGroup MozLineClamp msAnimationIterationCount msFlex msZoom msFlexGrow msFlexNegative msFlexOrder msFlexPositive msFlexShrink msGridColumn msGridColumnSpan msGridRow msGridRowSpan WebkitAnimationIterationCount WebkitBoxFlex WebKitBoxFlexGroup WebkitBoxOrdinalGroup WebkitColumnCount WebkitColumns WebkitFlex WebkitFlexGrow WebkitFlexPositive WebkitFlexShrink WebkitLineClamp".split(" ")); function jg(t, s, i) { var u = s.indexOf("--") === 0; i == null || typeof i == "boolean" || i === "" ? u ? t.setProperty(s, "") : s === "float" ? t.cssFloat = "" : t[s] = "" : u ? t.setProperty(s, i) : typeof i != "number" || i === 0 || pj.has(s) ? s === "float" ? t.cssFloat = i : t[s] = ("" + i).trim() : t[s] = i + "px" } function _g(t, s, i) { if (s != null && typeof s != "object") throw Error(a(62)); if (t = t.style, i != null) { for (var u in i) !i.hasOwnProperty(u) || s != null && s.hasOwnProperty(u) || (u.indexOf("--") === 0 ? t.setProperty(u, "") : u === "float" ? t.cssFloat = "" : t[u] = ""); for (var p in s) u = s[p], s.hasOwnProperty(p) && i[p] !== u && jg(t, p, u) } else for (var v in s) s.hasOwnProperty(v) && jg(t, v, s[v]) } function Fd(t) { if (t.indexOf("-") === -1) return !1; switch (t) { case "annotation-xml": case "color-profile": case "font-face": case "font-face-src": case "font-face-uri": case "font-face-format": case "font-face-name": case "missing-glyph": return !1; default: return !0 } } var gj = new Map([["acceptCharset", "accept-charset"], ["htmlFor", "for"], ["httpEquiv", "http-equiv"], ["crossOrigin", "crossorigin"], ["accentHeight", "accent-height"], ["alignmentBaseline", "alignment-baseline"], ["arabicForm", "arabic-form"], ["baselineShift", "baseline-shift"], ["capHeight", "cap-height"], ["clipPath", "clip-path"], ["clipRule", "clip-rule"], ["colorInterpolation", "color-interpolation"], ["colorInterpolationFilters", "color-interpolation-filters"], ["colorProfile", "color-profile"], ["colorRendering", "color-rendering"], ["dominantBaseline", "dominant-baseline"], ["enableBackground", "enable-background"], ["fillOpacity", "fill-opacity"], ["fillRule", "fill-rule"], ["floodColor", "flood-color"], ["floodOpacity", "flood-opacity"], ["fontFamily", "font-family"], ["fontSize", "font-size"], ["fontSizeAdjust", "font-size-adjust"], ["fontStretch", "font-stretch"], ["fontStyle", "font-style"], ["fontVariant", "font-variant"], ["fontWeight", "font-weight"], ["glyphName", "glyph-name"], ["glyphOrientationHorizontal", "glyph-orientation-horizontal"], ["glyphOrientationVertical", "glyph-orientation-vertical"], ["horizAdvX", "horiz-adv-x"], ["horizOriginX", "horiz-origin-x"], ["imageRendering", "image-rendering"], ["letterSpacing", "letter-spacing"], ["lightingColor", "lighting-color"], ["markerEnd", "marker-end"], ["markerMid", "marker-mid"], ["markerStart", "marker-start"], ["overlinePosition", "overline-position"], ["overlineThickness", "overline-thickness"], ["paintOrder", "paint-order"], ["panose-1", "panose-1"], ["pointerEvents", "pointer-events"], ["renderingIntent", "rendering-intent"], ["shapeRendering", "shape-rendering"], ["stopColor", "stop-color"], ["stopOpacity", "stop-opacity"], ["strikethroughPosition", "strikethrough-position"], ["strikethroughThickness", "strikethrough-thickness"], ["strokeDasharray", "stroke-dasharray"], ["strokeDashoffset", "stroke-dashoffset"], ["strokeLinecap", "stroke-linecap"], ["strokeLinejoin", "stroke-linejoin"], ["strokeMiterlimit", "stroke-miterlimit"], ["strokeOpacity", "stroke-opacity"], ["strokeWidth", "stroke-width"], ["textAnchor", "text-anchor"], ["textDecoration", "text-decoration"], ["textRendering", "text-rendering"], ["transformOrigin", "transform-origin"], ["underlinePosition", "underline-position"], ["underlineThickness", "underline-thickness"], ["unicodeBidi", "unicode-bidi"], ["unicodeRange", "unicode-range"], ["unitsPerEm", "units-per-em"], ["vAlphabetic", "v-alphabetic"], ["vHanging", "v-hanging"], ["vIdeographic", "v-ideographic"], ["vMathematical", "v-mathematical"], ["vectorEffect", "vector-effect"], ["vertAdvY", "vert-adv-y"], ["vertOriginX", "vert-origin-x"], ["vertOriginY", "vert-origin-y"], ["wordSpacing", "word-spacing"], ["writingMode", "writing-mode"], ["xmlnsXlink", "xmlns:xlink"], ["xHeight", "x-height"]]), xj = /^[\u0000-\u001F ]*j[\r\n\t]*a[\r\n\t]*v[\r\n\t]*a[\r\n\t]*s[\r\n\t]*c[\r\n\t]*r[\r\n\t]*i[\r\n\t]*p[\r\n\t]*t[\r\n\t]*:/i; function $l(t) { return xj.test("" + t) ? "javascript:throw new Error('React has blocked a javascript: URL as a security precaution.')" : t } var Yd = null; function Gd(t) { return t = t.target || t.srcElement || window, t.correspondingUseElement && (t = t.correspondingUseElement), t.nodeType === 3 ? t.parentNode : t } var Co = null, ko = null; function Eg(t) { var s = ps(t); if (s && (t = s.stateNode)) { var i = t[Xt] || null; e: switch (t = s.stateNode, s.type) { case "input": if (zr(t, i.value, i.defaultValue, i.defaultValue, i.checked, i.defaultChecked, i.type, i.name), s = i.name, i.type === "radio" && s != null) { for (i = t; i.parentNode;)i = i.parentNode; for (i = i.querySelectorAll('input[name="' + nn("" + s) + '"][type="radio"]'), s = 0; s < i.length; s++) { var u = i[s]; if (u !== t && u.form === t.form) { var p = u[Xt] || null; if (!p) throw Error(a(90)); zr(u, p.value, p.defaultValue, p.defaultValue, p.checked, p.defaultChecked, p.type, p.name) } } for (s = 0; s < i.length; s++)u = i[s], u.form === t.form && Ya(u) } break e; case "textarea": Ng(t, i.value, i.defaultValue); break e; case "select": s = i.value, s != null && vs(t, !!i.multiple, s, !1) } } } var Xd = !1; function Cg(t, s, i) { if (Xd) return t(s, i); Xd = !0; try { var u = t(s); return u } finally { if (Xd = !1, (Co !== null || ko !== null) && (jc(), Co && (s = Co, t = ko, ko = Co = null, Eg(s), t))) for (s = 0; s < t.length; s++)Eg(t[s]) } } function Xa(t, s) { var i = t.stateNode; if (i === null) return null; var u = i[Xt] || null; if (u === null) return null; i = u[s]; e: switch (s) { case "onClick": case "onClickCapture": case "onDoubleClick": case "onDoubleClickCapture": case "onMouseDown": case "onMouseDownCapture": case "onMouseMove": case "onMouseMoveCapture": case "onMouseUp": case "onMouseUpCapture": case "onMouseEnter": (u = !u.disabled) || (t = t.type, u = !(t === "button" || t === "input" || t === "select" || t === "textarea")), t = !u; break e; default: t = !1 }if (t) return null; if (i && typeof i != "function") throw Error(a(231, s, typeof i)); return i } var bs = !(typeof window > "u" || typeof window.document > "u" || typeof window.document.createElement > "u"), Zd = !1; if (bs) try { var Za = {}; Object.defineProperty(Za, "passive", { get: function () { Zd = !0 } }), window.addEventListener("test", Za, Za), window.removeEventListener("test", Za, Za) } catch { Zd = !1 } var Zs = null, Wd = null, Bl = null; function kg() { if (Bl) return Bl; var t, s = Wd, i = s.length, u, p = "value" in Zs ? Zs.value : Zs.textContent, v = p.length; for (t = 0; t < i && s[t] === p[t]; t++); var A = i - t; for (u = 1; u <= A && s[i - u] === p[v - u]; u++); return Bl = p.slice(t, 1 < u ? 1 - u : void 0) } function Pl(t) { var s = t.keyCode; return "charCode" in t ? (t = t.charCode, t === 0 && s === 13 && (t = 13)) : t = s, t === 10 && (t = 13), 32 <= t || t === 13 ? t : 0 } function Ul() { return !0 } function Ag() { return !1 } function sn(t) { function s(i, u, p, v, A) { this._reactName = i, this._targetInst = p, this.type = u, this.nativeEvent = v, this.target = A, this.currentTarget = null; for (var z in t) t.hasOwnProperty(z) && (i = t[z], this[z] = i ? i(v) : v[z]); return this.isDefaultPrevented = (v.defaultPrevented != null ? v.defaultPrevented : v.returnValue === !1) ? Ul : Ag, this.isPropagationStopped = Ag, this } return g(s.prototype, { preventDefault: function () { this.defaultPrevented = !0; var i = this.nativeEvent; i && (i.preventDefault ? i.preventDefault() : typeof i.returnValue != "unknown" && (i.returnValue = !1), this.isDefaultPrevented = Ul) }, stopPropagation: function () { var i = this.nativeEvent; i && (i.stopPropagation ? i.stopPropagation() : typeof i.cancelBubble != "unknown" && (i.cancelBubble = !0), this.isPropagationStopped = Ul) }, persist: function () { }, isPersistent: Ul }), s } var Ir = { eventPhase: 0, bubbles: 0, cancelable: 0, timeStamp: function (t) { return t.timeStamp || Date.now() }, defaultPrevented: 0, isTrusted: 0 }, Vl = sn(Ir), Wa = g({}, Ir, { view: 0, detail: 0 }), yj = sn(Wa), Kd, Qd, Ka, ql = g({}, Wa, { screenX: 0, screenY: 0, clientX: 0, clientY: 0, pageX: 0, pageY: 0, ctrlKey: 0, shiftKey: 0, altKey: 0, metaKey: 0, getModifierState: ef, button: 0, buttons: 0, relatedTarget: function (t) { return t.relatedTarget === void 0 ? t.fromElement === t.srcElement ? t.toElement : t.fromElement : t.relatedTarget }, movementX: function (t) { return "movementX" in t ? t.movementX : (t !== Ka && (Ka && t.type === "mousemove" ? (Kd = t.screenX - Ka.screenX, Qd = t.screenY - Ka.screenY) : Qd = Kd = 0, Ka = t), Kd) }, movementY: function (t) { return "movementY" in t ? t.movementY : Qd } }), Mg = sn(ql), vj = g({}, ql, { dataTransfer: 0 }), bj = sn(vj), wj = g({}, Wa, { relatedTarget: 0 }), Jd = sn(wj), Nj = g({}, Ir, { animationName: 0, elapsedTime: 0, pseudoElement: 0 }), Sj = sn(Nj), jj = g({}, Ir, { clipboardData: function (t) { return "clipboardData" in t ? t.clipboardData : window.clipboardData } }), _j = sn(jj), Ej = g({}, Ir, { data: 0 }), Tg = sn(Ej), Cj = { Esc: "Escape", Spacebar: " ", Left: "ArrowLeft", Up: "ArrowUp", Right: "ArrowRight", Down: "ArrowDown", Del: "Delete", Win: "OS", Menu: "ContextMenu", Apps: "ContextMenu", Scroll: "ScrollLock", MozPrintableKey: "Unidentified" }, kj = { 8: "Backspace", 9: "Tab", 12: "Clear", 13: "Enter", 16: "Shift", 17: "Control", 18: "Alt", 19: "Pause", 20: "CapsLock", 27: "Escape", 32: " ", 33: "PageUp", 34: "PageDown", 35: "End", 36: "Home", 37: "ArrowLeft", 38: "ArrowUp", 39: "ArrowRight", 40: "ArrowDown", 45: "Insert", 46: "Delete", 112: "F1", 113: "F2", 114: "F3", 115: "F4", 116: "F5", 117: "F6", 118: "F7", 119: "F8", 120: "F9", 121: "F10", 122: "F11", 123: "F12", 144: "NumLock", 145: "ScrollLock", 224: "Meta" }, Aj = { Alt: "altKey", Control: "ctrlKey", Meta: "metaKey", Shift: "shiftKey" }; function Mj(t) { var s = this.nativeEvent; return s.getModifierState ? s.getModifierState(t) : (t = Aj[t]) ? !!s[t] : !1 } function ef() { return Mj } var Tj = g({}, Wa, { key: function (t) { if (t.key) { var s = Cj[t.key] || t.key; if (s !== "Unidentified") return s } return t.type === "keypress" ? (t = Pl(t), t === 13 ? "Enter" : String.fromCharCode(t)) : t.type === "keydown" || t.type === "keyup" ? kj[t.keyCode] || "Unidentified" : "" }, code: 0, location: 0, ctrlKey: 0, shiftKey: 0, altKey: 0, metaKey: 0, repeat: 0, locale: 0, getModifierState: ef, charCode: function (t) { return t.type === "keypress" ? Pl(t) : 0 }, keyCode: function (t) { return t.type === "keydown" || t.type === "keyup" ? t.keyCode : 0 }, which: function (t) { return t.type === "keypress" ? Pl(t) : t.type === "keydown" || t.type === "keyup" ? t.keyCode : 0 } }), Rj = sn(Tj), Dj = g({}, ql, { pointerId: 0, width: 0, height: 0, pressure: 0, tangentialPressure: 0, tiltX: 0, tiltY: 0, twist: 0, pointerType: 0, isPrimary: 0 }), Rg = sn(Dj), Oj = g({}, Wa, { touches: 0, targetTouches: 0, changedTouches: 0, altKey: 0, metaKey: 0, ctrlKey: 0, shiftKey: 0, getModifierState: ef }), zj = sn(Oj), Ij = g({}, Ir, { propertyName: 0, elapsedTime: 0, pseudoElement: 0 }), Lj = sn(Ij), Hj = g({}, ql, { deltaX: function (t) { return "deltaX" in t ? t.deltaX : "wheelDeltaX" in t ? -t.wheelDeltaX : 0 }, deltaY: function (t) { return "deltaY" in t ? t.deltaY : "wheelDeltaY" in t ? -t.wheelDeltaY : "wheelDelta" in t ? -t.wheelDelta : 0 }, deltaZ: 0, deltaMode: 0 }), $j = sn(Hj), Bj = g({}, Ir, { newState: 0, oldState: 0 }), Pj = sn(Bj), Uj = [9, 13, 27, 32], tf = bs && "CompositionEvent" in window, Qa = null; bs && "documentMode" in document && (Qa = document.documentMode); var Vj = bs && "TextEvent" in window && !Qa, Dg = bs && (!tf || Qa && 8 < Qa && 11 >= Qa), Og = " ", zg = !1; function Ig(t, s) { switch (t) { case "keyup": return Uj.indexOf(s.keyCode) !== -1; case "keydown": return s.keyCode !== 229; case "keypress": case "mousedown": case "focusout": return !0; default: return !1 } } function Lg(t) { return t = t.detail, typeof t == "object" && "data" in t ? t.data : null } var Ao = !1; function qj(t, s) { switch (t) { case "compositionend": return Lg(s); case "keypress": return s.which !== 32 ? null : (zg = !0, Og); case "textInput": return t = s.data, t === Og && zg ? null : t; default: return null } } function Fj(t, s) { if (Ao) return t === "compositionend" || !tf && Ig(t, s) ? (t = kg(), Bl = Wd = Zs = null, Ao = !1, t) : null; switch (t) { case "paste": return null; case "keypress": if (!(s.ctrlKey || s.altKey || s.metaKey) || s.ctrlKey && s.altKey) { if (s.char && 1 < s.char.length) return s.char; if (s.which) return String.fromCharCode(s.which) } return null; case "compositionend": return Dg && s.locale !== "ko" ? null : s.data; default: return null } } var Yj = { color: !0, date: !0, datetime: !0, "datetime-local": !0, email: !0, month: !0, number: !0, password: !0, range: !0, search: !0, tel: !0, text: !0, time: !0, url: !0, week: !0 }; function Hg(t) { var s = t && t.nodeName && t.nodeName.toLowerCase(); return s === "input" ? !!Yj[t.type] : s === "textarea" } function $g(t, s, i, u) { Co ? ko ? ko.push(u) : ko = [u] : Co = u, s = Mc(s, "onChange"), 0 < s.length && (i = new Vl("onChange", "change", null, i, u), t.push({ event: i, listeners: s })) } var Ja = null, ei = null; function Gj(t) { vy(t, 0) } function Fl(t) { var s = Gs(t); if (Ya(s)) return t } function Bg(t, s) { if (t === "change") return s } var Pg = !1; if (bs) { var nf; if (bs) { var sf = "oninput" in document; if (!sf) { var Ug = document.createElement("div"); Ug.setAttribute("oninput", "return;"), sf = typeof Ug.oninput == "function" } nf = sf } else nf = !1; Pg = nf && (!document.documentMode || 9 < document.documentMode) } function Vg() { Ja && (Ja.detachEvent("onpropertychange", qg), ei = Ja = null) } function qg(t) { if (t.propertyName === "value" && Fl(ei)) { var s = []; $g(s, ei, t, Gd(t)), Cg(Gj, s) } } function Xj(t, s, i) { t === "focusin" ? (Vg(), Ja = s, ei = i, Ja.attachEvent("onpropertychange", qg)) : t === "focusout" && Vg() } function Zj(t) { if (t === "selectionchange" || t === "keyup" || t === "keydown") return Fl(ei) } function Wj(t, s) { if (t === "click") return Fl(s) } function Kj(t, s) { if (t === "input" || t === "change") return Fl(s) } function Qj(t, s) { return t === s && (t !== 0 || 1 / t === 1 / s) || t !== t && s !== s } var cn = typeof Object.is == "function" ? Object.is : Qj; function ti(t, s) { if (cn(t, s)) return !0; if (typeof t != "object" || t === null || typeof s != "object" || s === null) return !1; var i = Object.keys(t), u = Object.keys(s); if (i.length !== u.length) return !1; for (u = 0; u < i.length; u++) { var p = i[u]; if (!ne.call(s, p) || !cn(t[p], s[p])) return !1 } return !0 } function Fg(t) { for (; t && t.firstChild;)t = t.firstChild; return t } function Yg(t, s) { var i = Fg(t); t = 0; for (var u; i;) { if (i.nodeType === 3) { if (u = t + i.textContent.length, t <= s && u >= s) return { node: i, offset: s - t }; t = u } e: { for (; i;) { if (i.nextSibling) { i = i.nextSibling; break e } i = i.parentNode } i = void 0 } i = Fg(i) } } function Gg(t, s) { return t && s ? t === s ? !0 : t && t.nodeType === 3 ? !1 : s && s.nodeType === 3 ? Gg(t, s.parentNode) : "contains" in t ? t.contains(s) : t.compareDocumentPosition ? !!(t.compareDocumentPosition(s) & 16) : !1 : !1 } function Xg(t) { t = t != null && t.ownerDocument != null && t.ownerDocument.defaultView != null ? t.ownerDocument.defaultView : window; for (var s = _o(t.document); s instanceof t.HTMLIFrameElement;) { try { var i = typeof s.contentWindow.location.href == "string" } catch { i = !1 } if (i) t = s.contentWindow; else break; s = _o(t.document) } return s } function rf(t) { var s = t && t.nodeName && t.nodeName.toLowerCase(); return s && (s === "input" && (t.type === "text" || t.type === "search" || t.type === "tel" || t.type === "url" || t.type === "password") || s === "textarea" || t.contentEditable === "true") } var Jj = bs && "documentMode" in document && 11 >= document.documentMode, Mo = null, of = null, ni = null, af = !1; function Zg(t, s, i) { var u = i.window === i ? i.document : i.nodeType === 9 ? i : i.ownerDocument; af || Mo == null || Mo !== _o(u) || (u = Mo, "selectionStart" in u && rf(u) ? u = { start: u.selectionStart, end: u.selectionEnd } : (u = (u.ownerDocument && u.ownerDocument.defaultView || window).getSelection(), u = { anchorNode: u.anchorNode, anchorOffset: u.anchorOffset, focusNode: u.focusNode, focusOffset: u.focusOffset }), ni && ti(ni, u) || (ni = u, u = Mc(of, "onSelect"), 0 < u.length && (s = new Vl("onSelect", "select", null, s, i), t.push({ event: s, listeners: u }), s.target = Mo))) } function Lr(t, s) { var i = {}; return i[t.toLowerCase()] = s.toLowerCase(), i["Webkit" + t] = "webkit" + s, i["Moz" + t] = "moz" + s, i } var To = { animationend: Lr("Animation", "AnimationEnd"), animationiteration: Lr("Animation", "AnimationIteration"), animationstart: Lr("Animation", "AnimationStart"), transitionrun: Lr("Transition", "TransitionRun"), transitionstart: Lr("Transition", "TransitionStart"), transitioncancel: Lr("Transition", "TransitionCancel"), transitionend: Lr("Transition", "TransitionEnd") }, lf = {}, Wg = {}; bs && (Wg = document.createElement("div").style, "AnimationEvent" in window || (delete To.animationend.animation, delete To.animationiteration.animation, delete To.animationstart.animation), "TransitionEvent" in window || delete To.transitionend.transition); function Hr(t) { if (lf[t]) return lf[t]; if (!To[t]) return t; var s = To[t], i; for (i in s) if (s.hasOwnProperty(i) && i in Wg) return lf[t] = s[i]; return t } var Kg = Hr("animationend"), Qg = Hr("animationiteration"), Jg = Hr("animationstart"), e_ = Hr("transitionrun"), t_ = Hr("transitionstart"), n_ = Hr("transitioncancel"), ex = Hr("transitionend"), tx = new Map, cf = "abort auxClick beforeToggle cancel canPlay canPlayThrough click close contextMenu copy cut drag dragEnd dragEnter dragExit dragLeave dragOver dragStart drop durationChange emptied encrypted ended error gotPointerCapture input invalid keyDown keyPress keyUp load loadedData loadedMetadata loadStart lostPointerCapture mouseDown mouseMove mouseOut mouseOver mouseUp paste pause play playing pointerCancel pointerDown pointerMove pointerOut pointerOver pointerUp progress rateChange reset resize seeked seeking stalled submit suspend timeUpdate touchCancel touchEnd touchStart volumeChange scroll toggle touchMove waiting wheel".split(" "); cf.push("scrollEnd"); function In(t, s) { tx.set(t, s), xs(s, [t]) } var nx = new WeakMap; function Nn(t, s) { if (typeof t == "object" && t !== null) { var i = nx.get(t); return i !== void 0 ? i : (s = { value: t, source: s, stack: Il(s) }, nx.set(t, s), s) } return { value: t, source: s, stack: Il(s) } } var Sn = [], Ro = 0, uf = 0; function Yl() { for (var t = Ro, s = uf = Ro = 0; s < t;) { var i = Sn[s]; Sn[s++] = null; var u = Sn[s]; Sn[s++] = null; var p = Sn[s]; Sn[s++] = null; var v = Sn[s]; if (Sn[s++] = null, u !== null && p !== null) { var A = u.pending; A === null ? p.next = p : (p.next = A.next, A.next = p), u.pending = p } v !== 0 && sx(i, p, v) } } function Gl(t, s, i, u) { Sn[Ro++] = t, Sn[Ro++] = s, Sn[Ro++] = i, Sn[Ro++] = u, uf |= u, t.lanes |= u, t = t.alternate, t !== null && (t.lanes |= u) } function df(t, s, i, u) { return Gl(t, s, i, u), Xl(t) } function Do(t, s) { return Gl(t, null, null, s), Xl(t) } function sx(t, s, i) { t.lanes |= i; var u = t.alternate; u !== null && (u.lanes |= i); for (var p = !1, v = t.return; v !== null;)v.childLanes |= i, u = v.alternate, u !== null && (u.childLanes |= i), v.tag === 22 && (t = v.stateNode, t === null || t._visibility & 1 || (p = !0)), t = v, v = v.return; return t.tag === 3 ? (v = t.stateNode, p && s !== null && (p = 31 - yt(i), t = v.hiddenUpdates, u = t[p], u === null ? t[p] = [s] : u.push(s), s.lane = i | 536870912), v) : null } function Xl(t) { if (50 < Ci) throw Ci = 0, xm = null, Error(a(185)); for (var s = t.return; s !== null;)t = s, s = t.return; return t.tag === 3 ? t.stateNode : null } var Oo = {}; function s_(t, s, i, u) { this.tag = t, this.key = i, this.sibling = this.child = this.return = this.stateNode = this.type = this.elementType = null, this.index = 0, this.refCleanup = this.ref = null, this.pendingProps = s, this.dependencies = this.memoizedState = this.updateQueue = this.memoizedProps = null, this.mode = u, this.subtreeFlags = this.flags = 0, this.deletions = null, this.childLanes = this.lanes = 0, this.alternate = null } function un(t, s, i, u) { return new s_(t, s, i, u) } function ff(t) { return t = t.prototype, !(!t || !t.isReactComponent) } function ws(t, s) { var i = t.alternate; return i === null ? (i = un(t.tag, s, t.key, t.mode), i.elementType = t.elementType, i.type = t.type, i.stateNode = t.stateNode, i.alternate = t, t.alternate = i) : (i.pendingProps = s, i.type = t.type, i.flags = 0, i.subtreeFlags = 0, i.deletions = null), i.flags = t.flags & 65011712, i.childLanes = t.childLanes, i.lanes = t.lanes, i.child = t.child, i.memoizedProps = t.memoizedProps, i.memoizedState = t.memoizedState, i.updateQueue = t.updateQueue, s = t.dependencies, i.dependencies = s === null ? null : { lanes: s.lanes, firstContext: s.firstContext }, i.sibling = t.sibling, i.index = t.index, i.ref = t.ref, i.refCleanup = t.refCleanup, i } function rx(t, s) { t.flags &= 65011714; var i = t.alternate; return i === null ? (t.childLanes = 0, t.lanes = s, t.child = null, t.subtreeFlags = 0, t.memoizedProps = null, t.memoizedState = null, t.updateQueue = null, t.dependencies = null, t.stateNode = null) : (t.childLanes = i.childLanes, t.lanes = i.lanes, t.child = i.child, t.subtreeFlags = 0, t.deletions = null, t.memoizedProps = i.memoizedProps, t.memoizedState = i.memoizedState, t.updateQueue = i.updateQueue, t.type = i.type, s = i.dependencies, t.dependencies = s === null ? null : { lanes: s.lanes, firstContext: s.firstContext }), t } function Zl(t, s, i, u, p, v) { var A = 0; if (u = t, typeof t == "function") ff(t) && (A = 1); else if (typeof t == "string") A = oE(t, i, W.current) ? 26 : t === "html" || t === "head" || t === "body" ? 27 : 5; else e: switch (t) { case q: return t = un(31, i, s, p), t.elementType = q, t.lanes = v, t; case S: return $r(i.children, p, v, s); case N: A = 8, p |= 24; break; case j: return t = un(12, i, s, p | 2), t.elementType = j, t.lanes = v, t; case R: return t = un(13, i, s, p), t.elementType = R, t.lanes = v, t; case D: return t = un(19, i, s, p), t.elementType = D, t.lanes = v, t; default: if (typeof t == "object" && t !== null) switch (t.$$typeof) { case _: case E: A = 10; break e; case M: A = 9; break e; case T: A = 11; break e; case O: A = 14; break e; case B: A = 16, u = null; break e }A = 29, i = Error(a(130, t === null ? "null" : typeof t, "")), u = null }return s = un(A, i, s, p), s.elementType = t, s.type = u, s.lanes = v, s } function $r(t, s, i, u) { return t = un(7, t, u, s), t.lanes = i, t } function mf(t, s, i) { return t = un(6, t, null, s), t.lanes = i, t } function hf(t, s, i) { return s = un(4, t.children !== null ? t.children : [], t.key, s), s.lanes = i, s.stateNode = { containerInfo: t.containerInfo, pendingChildren: null, implementation: t.implementation }, s } var zo = [], Io = 0, Wl = null, Kl = 0, jn = [], _n = 0, Br = null, Ns = 1, Ss = ""; function Pr(t, s) { zo[Io++] = Kl, zo[Io++] = Wl, Wl = t, Kl = s } function ox(t, s, i) { jn[_n++] = Ns, jn[_n++] = Ss, jn[_n++] = Br, Br = t; var u = Ns; t = Ss; var p = 32 - yt(u) - 1; u &= ~(1 << p), i += 1; var v = 32 - yt(s) + p; if (30 < v) { var A = p - p % 5; v = (u & (1 << A) - 1).toString(32), u >>= A, p -= A, Ns = 1 << 32 - yt(s) + p | i << p | u, Ss = v + t } else Ns = 1 << v | i << p | u, Ss = t } function pf(t) { t.return !== null && (Pr(t, 1), ox(t, 1, 0)) } function gf(t) { for (; t === Wl;)Wl = zo[--Io], zo[Io] = null, Kl = zo[--Io], zo[Io] = null; for (; t === Br;)Br = jn[--_n], jn[_n] = null, Ss = jn[--_n], jn[_n] = null, Ns = jn[--_n], jn[_n] = null } var Kt = null, jt = null, at = !1, Ur = null, Zn = !1, xf = Error(a(519)); function Vr(t) { var s = Error(a(418, "")); throw oi(Nn(s, t)), xf } function ax(t) { var s = t.stateNode, i = t.type, u = t.memoizedProps; switch (s[Ht] = t, s[Xt] = u, i) { case "dialog": et("cancel", s), et("close", s); break; case "iframe": case "object": case "embed": et("load", s); break; case "video": case "audio": for (i = 0; i < Ai.length; i++)et(Ai[i], s); break; case "source": et("error", s); break; case "img": case "image": case "link": et("error", s), et("load", s); break; case "details": et("toggle", s); break; case "input": et("invalid", s), Hl(s, u.value, u.defaultValue, u.checked, u.defaultChecked, u.type, u.name, !0), jo(s); break; case "select": et("invalid", s); break; case "textarea": et("invalid", s), Sg(s, u.value, u.defaultValue, u.children), jo(s) }i = u.children, typeof i != "string" && typeof i != "number" && typeof i != "bigint" || s.textContent === "" + i || u.suppressHydrationWarning === !0 || Sy(s.textContent, i) ? (u.popover != null && (et("beforetoggle", s), et("toggle", s)), u.onScroll != null && et("scroll", s), u.onScrollEnd != null && et("scrollend", s), u.onClick != null && (s.onclick = Tc), s = !0) : s = !1, s || Vr(t) } function ix(t) { for (Kt = t.return; Kt;)switch (Kt.tag) { case 5: case 13: Zn = !1; return; case 27: case 3: Zn = !0; return; default: Kt = Kt.return } } function si(t) { if (t !== Kt) return !1; if (!at) return ix(t), at = !0, !1; var s = t.tag, i; if ((i = s !== 3 && s !== 27) && ((i = s === 5) && (i = t.type, i = !(i !== "form" && i !== "button") || Dm(t.type, t.memoizedProps)), i = !i), i && jt && Vr(t), ix(t), s === 13) { if (t = t.memoizedState, t = t !== null ? t.dehydrated : null, !t) throw Error(a(317)); e: { for (t = t.nextSibling, s = 0; t;) { if (t.nodeType === 8) if (i = t.data, i === "/$") { if (s === 0) { jt = Hn(t.nextSibling); break e } s-- } else i !== "$" && i !== "$!" && i !== "$?" || s++; t = t.nextSibling } jt = null } } else s === 27 ? (s = jt, dr(t.type) ? (t = Lm, Lm = null, jt = t) : jt = s) : jt = Kt ? Hn(t.stateNode.nextSibling) : null; return !0 } function ri() { jt = Kt = null, at = !1 } function lx() { var t = Ur; return t !== null && (an === null ? an = t : an.push.apply(an, t), Ur = null), t } function oi(t) { Ur === null ? Ur = [t] : Ur.push(t) } var yf = $(null), qr = null, js = null; function Ws(t, s, i) { V(yf, s._currentValue), s._currentValue = i } function _s(t) { t._currentValue = yf.current, Y(yf) } function vf(t, s, i) { for (; t !== null;) { var u = t.alternate; if ((t.childLanes & s) !== s ? (t.childLanes |= s, u !== null && (u.childLanes |= s)) : u !== null && (u.childLanes & s) !== s && (u.childLanes |= s), t === i) break; t = t.return } } function bf(t, s, i, u) { var p = t.child; for (p !== null && (p.return = t); p !== null;) { var v = p.dependencies; if (v !== null) { var A = p.child; v = v.firstContext; e: for (; v !== null;) { var z = v; v = p; for (var F = 0; F < s.length; F++)if (z.context === s[F]) { v.lanes |= i, z = v.alternate, z !== null && (z.lanes |= i), vf(v.return, i, t), u || (A = null); break e } v = z.next } } else if (p.tag === 18) { if (A = p.return, A === null) throw Error(a(341)); A.lanes |= i, v = A.alternate, v !== null && (v.lanes |= i), vf(A, i, t), A = null } else A = p.child; if (A !== null) A.return = p; else for (A = p; A !== null;) { if (A === t) { A = null; break } if (p = A.sibling, p !== null) { p.return = A.return, A = p; break } A = A.return } p = A } } function ai(t, s, i, u) { t = null; for (var p = s, v = !1; p !== null;) { if (!v) { if ((p.flags & 524288) !== 0) v = !0; else if ((p.flags & 262144) !== 0) break } if (p.tag === 10) { var A = p.alternate; if (A === null) throw Error(a(387)); if (A = A.memoizedProps, A !== null) { var z = p.type; cn(p.pendingProps.value, A.value) || (t !== null ? t.push(z) : t = [z]) } } else if (p === te.current) { if (A = p.alternate, A === null) throw Error(a(387)); A.memoizedState.memoizedState !== p.memoizedState.memoizedState && (t !== null ? t.push(zi) : t = [zi]) } p = p.return } t !== null && bf(s, t, i, u), s.flags |= 262144 } function Ql(t) { for (t = t.firstContext; t !== null;) { if (!cn(t.context._currentValue, t.memoizedValue)) return !0; t = t.next } return !1 } function Fr(t) { qr = t, js = null, t = t.dependencies, t !== null && (t.firstContext = null) } function Zt(t) { return cx(qr, t) } function Jl(t, s) { return qr === null && Fr(t), cx(t, s) } function cx(t, s) { var i = s._currentValue; if (s = { context: s, memoizedValue: i, next: null }, js === null) { if (t === null) throw Error(a(308)); js = s, t.dependencies = { lanes: 0, firstContext: s }, t.flags |= 524288 } else js = js.next = s; return i } var r_ = typeof AbortController < "u" ? AbortController : function () { var t = [], s = this.signal = { aborted: !1, addEventListener: function (i, u) { t.push(u) } }; this.abort = function () { s.aborted = !0, t.forEach(function (i) { return i() }) } }, o_ = e.unstable_scheduleCallback, a_ = e.unstable_NormalPriority, Ot = { $$typeof: E, Consumer: null, Provider: null, _currentValue: null, _currentValue2: null, _threadCount: 0 }; function wf() { return { controller: new r_, data: new Map, refCount: 0 } } function ii(t) { t.refCount--, t.refCount === 0 && o_(a_, function () { t.controller.abort() }) } var li = null, Nf = 0, Lo = 0, Ho = null; function i_(t, s) { if (li === null) { var i = li = []; Nf = 0, Lo = jm(), Ho = { status: "pending", value: void 0, then: function (u) { i.push(u) } } } return Nf++, s.then(ux, ux), s } function ux() { if (--Nf === 0 && li !== null) { Ho !== null && (Ho.status = "fulfilled"); var t = li; li = null, Lo = 0, Ho = null; for (var s = 0; s < t.length; s++)(0, t[s])() } } function l_(t, s) { var i = [], u = { status: "pending", value: null, reason: null, then: function (p) { i.push(p) } }; return t.then(function () { u.status = "fulfilled", u.value = s; for (var p = 0; p < i.length; p++)(0, i[p])(s) }, function (p) { for (u.status = "rejected", u.reason = p, p = 0; p < i.length; p++)(0, i[p])(void 0) }), u } var dx = k.S; k.S = function (t, s) { typeof s == "object" && s !== null && typeof s.then == "function" && i_(t, s), dx !== null && dx(t, s) }; var Yr = $(null); function Sf() { var t = Yr.current; return t !== null ? t : gt.pooledCache } function ec(t, s) { s === null ? V(Yr, Yr.current) : V(Yr, s.pool) } function fx() { var t = Sf(); return t === null ? null : { parent: Ot._currentValue, pool: t } } var ci = Error(a(460)), mx = Error(a(474)), tc = Error(a(542)), jf = { then: function () { } }; function hx(t) { return t = t.status, t === "fulfilled" || t === "rejected" } function nc() { } function px(t, s, i) { switch (i = t[i], i === void 0 ? t.push(s) : i !== s && (s.then(nc, nc), s = i), s.status) { case "fulfilled": return s.value; case "rejected": throw t = s.reason, xx(t), t; default: if (typeof s.status == "string") s.then(nc, nc); else { if (t = gt, t !== null && 100 < t.shellSuspendCounter) throw Error(a(482)); t = s, t.status = "pending", t.then(function (u) { if (s.status === "pending") { var p = s; p.status = "fulfilled", p.value = u } }, function (u) { if (s.status === "pending") { var p = s; p.status = "rejected", p.reason = u } }) } switch (s.status) { case "fulfilled": return s.value; case "rejected": throw t = s.reason, xx(t), t }throw ui = s, ci } } var ui = null; function gx() { if (ui === null) throw Error(a(459)); var t = ui; return ui = null, t } function xx(t) { if (t === ci || t === tc) throw Error(a(483)) } var Ks = !1; function _f(t) { t.updateQueue = { baseState: t.memoizedState, firstBaseUpdate: null, lastBaseUpdate: null, shared: { pending: null, lanes: 0, hiddenCallbacks: null }, callbacks: null } } function Ef(t, s) { t = t.updateQueue, s.updateQueue === t && (s.updateQueue = { baseState: t.baseState, firstBaseUpdate: t.firstBaseUpdate, lastBaseUpdate: t.lastBaseUpdate, shared: t.shared, callbacks: null }) } function Qs(t) { return { lane: t, tag: 0, payload: null, callback: null, next: null } } function Js(t, s, i) { var u = t.updateQueue; if (u === null) return null; if (u = u.shared, (ct & 2) !== 0) { var p = u.pending; return p === null ? s.next = s : (s.next = p.next, p.next = s), u.pending = s, s = Xl(t), sx(t, null, i), s } return Gl(t, u, s, i), Xl(t) } function di(t, s, i) { if (s = s.updateQueue, s !== null && (s = s.shared, (i & 4194048) !== 0)) { var u = s.lanes; u &= t.pendingLanes, i |= u, s.lanes = i, La(t, i) } } function Cf(t, s) { var i = t.updateQueue, u = t.alternate; if (u !== null && (u = u.updateQueue, i === u)) { var p = null, v = null; if (i = i.firstBaseUpdate, i !== null) { do { var A = { lane: i.lane, tag: i.tag, payload: i.payload, callback: null, next: null }; v === null ? p = v = A : v = v.next = A, i = i.next } while (i !== null); v === null ? p = v = s : v = v.next = s } else p = v = s; i = { baseState: u.baseState, firstBaseUpdate: p, lastBaseUpdate: v, shared: u.shared, callbacks: u.callbacks }, t.updateQueue = i; return } t = i.lastBaseUpdate, t === null ? i.firstBaseUpdate = s : t.next = s, i.lastBaseUpdate = s } var kf = !1; function fi() { if (kf) { var t = Ho; if (t !== null) throw t } } function mi(t, s, i, u) { kf = !1; var p = t.updateQueue; Ks = !1; var v = p.firstBaseUpdate, A = p.lastBaseUpdate, z = p.shared.pending; if (z !== null) { p.shared.pending = null; var F = z, re = F.next; F.next = null, A === null ? v = re : A.next = re, A = F; var le = t.alternate; le !== null && (le = le.updateQueue, z = le.lastBaseUpdate, z !== A && (z === null ? le.firstBaseUpdate = re : z.next = re, le.lastBaseUpdate = F)) } if (v !== null) { var me = p.baseState; A = 0, le = re = F = null, z = v; do { var oe = z.lane & -536870913, ae = oe !== z.lane; if (ae ? (nt & oe) === oe : (u & oe) === oe) { oe !== 0 && oe === Lo && (kf = !0), le !== null && (le = le.next = { lane: 0, tag: z.tag, payload: z.payload, callback: null, next: null }); e: { var qe = t, Be = z; oe = s; var mt = i; switch (Be.tag) { case 1: if (qe = Be.payload, typeof qe == "function") { me = qe.call(mt, me, oe); break e } me = qe; break e; case 3: qe.flags = qe.flags & -65537 | 128; case 0: if (qe = Be.payload, oe = typeof qe == "function" ? qe.call(mt, me, oe) : qe, oe == null) break e; me = g({}, me, oe); break e; case 2: Ks = !0 } } oe = z.callback, oe !== null && (t.flags |= 64, ae && (t.flags |= 8192), ae = p.callbacks, ae === null ? p.callbacks = [oe] : ae.push(oe)) } else ae = { lane: oe, tag: z.tag, payload: z.payload, callback: z.callback, next: null }, le === null ? (re = le = ae, F = me) : le = le.next = ae, A |= oe; if (z = z.next, z === null) { if (z = p.shared.pending, z === null) break; ae = z, z = ae.next, ae.next = null, p.lastBaseUpdate = ae, p.shared.pending = null } } while (!0); le === null && (F = me), p.baseState = F, p.firstBaseUpdate = re, p.lastBaseUpdate = le, v === null && (p.shared.lanes = 0), ir |= A, t.lanes = A, t.memoizedState = me } } function yx(t, s) { if (typeof t != "function") throw Error(a(191, t)); t.call(s) } function vx(t, s) { var i = t.callbacks; if (i !== null) for (t.callbacks = null, t = 0; t < i.length; t++)yx(i[t], s) } var $o = $(null), sc = $(0); function bx(t, s) { t = Rs, V(sc, t), V($o, s), Rs = t | s.baseLanes } function Af() { V(sc, Rs), V($o, $o.current) } function Mf() { Rs = sc.current, Y($o), Y(sc) } var er = 0, Ze = null, dt = null, Tt = null, rc = !1, Bo = !1, Gr = !1, oc = 0, hi = 0, Po = null, c_ = 0; function Ct() { throw Error(a(321)) } function Tf(t, s) { if (s === null) return !1; for (var i = 0; i < s.length && i < t.length; i++)if (!cn(t[i], s[i])) return !1; return !0 } function Rf(t, s, i, u, p, v) { return er = v, Ze = s, s.memoizedState = null, s.updateQueue = null, s.lanes = 0, k.H = t === null || t.memoizedState === null ? s0 : r0, Gr = !1, v = i(u, p), Gr = !1, Bo && (v = Nx(s, i, u, p)), wx(t), v } function wx(t) { k.H = dc; var s = dt !== null && dt.next !== null; if (er = 0, Tt = dt = Ze = null, rc = !1, hi = 0, Po = null, s) throw Error(a(300)); t === null || $t || (t = t.dependencies, t !== null && Ql(t) && ($t = !0)) } function Nx(t, s, i, u) { Ze = t; var p = 0; do { if (Bo && (Po = null), hi = 0, Bo = !1, 25 <= p) throw Error(a(301)); if (p += 1, Tt = dt = null, t.updateQueue != null) { var v = t.updateQueue; v.lastEffect = null, v.events = null, v.stores = null, v.memoCache != null && (v.memoCache.index = 0) } k.H = g_, v = s(i, u) } while (Bo); return v } function u_() { var t = k.H, s = t.useState()[0]; return s = typeof s.then == "function" ? pi(s) : s, t = t.useState()[0], (dt !== null ? dt.memoizedState : null) !== t && (Ze.flags |= 1024), s } function Df() { var t = oc !== 0; return oc = 0, t } function Of(t, s, i) { s.updateQueue = t.updateQueue, s.flags &= -2053, t.lanes &= ~i } function zf(t) { if (rc) { for (t = t.memoizedState; t !== null;) { var s = t.queue; s !== null && (s.pending = null), t = t.next } rc = !1 } er = 0, Tt = dt = Ze = null, Bo = !1, hi = oc = 0, Po = null } function rn() { var t = { memoizedState: null, baseState: null, baseQueue: null, queue: null, next: null }; return Tt === null ? Ze.memoizedState = Tt = t : Tt = Tt.next = t, Tt } function Rt() { if (dt === null) { var t = Ze.alternate; t = t !== null ? t.memoizedState : null } else t = dt.next; var s = Tt === null ? Ze.memoizedState : Tt.next; if (s !== null) Tt = s, dt = t; else { if (t === null) throw Ze.alternate === null ? Error(a(467)) : Error(a(310)); dt = t, t = { memoizedState: dt.memoizedState, baseState: dt.baseState, baseQueue: dt.baseQueue, queue: dt.queue, next: null }, Tt === null ? Ze.memoizedState = Tt = t : Tt = Tt.next = t } return Tt } function If() { return { lastEffect: null, events: null, stores: null, memoCache: null } } function pi(t) { var s = hi; return hi += 1, Po === null && (Po = []), t = px(Po, t, s), s = Ze, (Tt === null ? s.memoizedState : Tt.next) === null && (s = s.alternate, k.H = s === null || s.memoizedState === null ? s0 : r0), t } function ac(t) { if (t !== null && typeof t == "object") { if (typeof t.then == "function") return pi(t); if (t.$$typeof === E) return Zt(t) } throw Error(a(438, String(t))) } function Lf(t) { var s = null, i = Ze.updateQueue; if (i !== null && (s = i.memoCache), s == null) { var u = Ze.alternate; u !== null && (u = u.updateQueue, u !== null && (u = u.memoCache, u != null && (s = { data: u.data.map(function (p) { return p.slice() }), index: 0 }))) } if (s == null && (s = { data: [], index: 0 }), i === null && (i = If(), Ze.updateQueue = i), i.memoCache = s, i = s.data[s.index], i === void 0) for (i = s.data[s.index] = Array(t), u = 0; u < t; u++)i[u] = K; return s.index++, i } function Es(t, s) { return typeof s == "function" ? s(t) : s } function ic(t) { var s = Rt(); return Hf(s, dt, t) } function Hf(t, s, i) { var u = t.queue; if (u === null) throw Error(a(311)); u.lastRenderedReducer = i; var p = t.baseQueue, v = u.pending; if (v !== null) { if (p !== null) { var A = p.next; p.next = v.next, v.next = A } s.baseQueue = p = v, u.pending = null } if (v = t.baseState, p === null) t.memoizedState = v; else { s = p.next; var z = A = null, F = null, re = s, le = !1; do { var me = re.lane & -536870913; if (me !== re.lane ? (nt & me) === me : (er & me) === me) { var oe = re.revertLane; if (oe === 0) F !== null && (F = F.next = { lane: 0, revertLane: 0, action: re.action, hasEagerState: re.hasEagerState, eagerState: re.eagerState, next: null }), me === Lo && (le = !0); else if ((er & oe) === oe) { re = re.next, oe === Lo && (le = !0); continue } else me = { lane: 0, revertLane: re.revertLane, action: re.action, hasEagerState: re.hasEagerState, eagerState: re.eagerState, next: null }, F === null ? (z = F = me, A = v) : F = F.next = me, Ze.lanes |= oe, ir |= oe; me = re.action, Gr && i(v, me), v = re.hasEagerState ? re.eagerState : i(v, me) } else oe = { lane: me, revertLane: re.revertLane, action: re.action, hasEagerState: re.hasEagerState, eagerState: re.eagerState, next: null }, F === null ? (z = F = oe, A = v) : F = F.next = oe, Ze.lanes |= me, ir |= me; re = re.next } while (re !== null && re !== s); if (F === null ? A = v : F.next = z, !cn(v, t.memoizedState) && ($t = !0, le && (i = Ho, i !== null))) throw i; t.memoizedState = v, t.baseState = A, t.baseQueue = F, u.lastRenderedState = v } return p === null && (u.lanes = 0), [t.memoizedState, u.dispatch] } function $f(t) { var s = Rt(), i = s.queue; if (i === null) throw Error(a(311)); i.lastRenderedReducer = t; var u = i.dispatch, p = i.pending, v = s.memoizedState; if (p !== null) { i.pending = null; var A = p = p.next; do v = t(v, A.action), A = A.next; while (A !== p); cn(v, s.memoizedState) || ($t = !0), s.memoizedState = v, s.baseQueue === null && (s.baseState = v), i.lastRenderedState = v } return [v, u] } function Sx(t, s, i) { var u = Ze, p = Rt(), v = at; if (v) { if (i === void 0) throw Error(a(407)); i = i() } else i = s(); var A = !cn((dt || p).memoizedState, i); A && (p.memoizedState = i, $t = !0), p = p.queue; var z = Ex.bind(null, u, p, t); if (gi(2048, 8, z, [t]), p.getSnapshot !== s || A || Tt !== null && Tt.memoizedState.tag & 1) { if (u.flags |= 2048, Uo(9, lc(), _x.bind(null, u, p, i, s), null), gt === null) throw Error(a(349)); v || (er & 124) !== 0 || jx(u, s, i) } return i } function jx(t, s, i) { t.flags |= 16384, t = { getSnapshot: s, value: i }, s = Ze.updateQueue, s === null ? (s = If(), Ze.updateQueue = s, s.stores = [t]) : (i = s.stores, i === null ? s.stores = [t] : i.push(t)) } function _x(t, s, i, u) { s.value = i, s.getSnapshot = u, Cx(s) && kx(t) } function Ex(t, s, i) { return i(function () { Cx(s) && kx(t) }) } function Cx(t) { var s = t.getSnapshot; t = t.value; try { var i = s(); return !cn(t, i) } catch { return !0 } } function kx(t) { var s = Do(t, 2); s !== null && pn(s, t, 2) } function Bf(t) { var s = rn(); if (typeof t == "function") { var i = t; if (t = i(), Gr) { Nt(!0); try { i() } finally { Nt(!1) } } } return s.memoizedState = s.baseState = t, s.queue = { pending: null, lanes: 0, dispatch: null, lastRenderedReducer: Es, lastRenderedState: t }, s } function Ax(t, s, i, u) { return t.baseState = i, Hf(t, dt, typeof u == "function" ? u : Es) } function d_(t, s, i, u, p) { if (uc(t)) throw Error(a(485)); if (t = s.action, t !== null) { var v = { payload: p, action: t, next: null, isTransition: !0, status: "pending", value: null, reason: null, listeners: [], then: function (A) { v.listeners.push(A) } }; k.T !== null ? i(!0) : v.isTransition = !1, u(v), i = s.pending, i === null ? (v.next = s.pending = v, Mx(s, v)) : (v.next = i.next, s.pending = i.next = v) } } function Mx(t, s) { var i = s.action, u = s.payload, p = t.state; if (s.isTransition) { var v = k.T, A = {}; k.T = A; try { var z = i(p, u), F = k.S; F !== null && F(A, z), Tx(t, s, z) } catch (re) { Pf(t, s, re) } finally { k.T = v } } else try { v = i(p, u), Tx(t, s, v) } catch (re) { Pf(t, s, re) } } function Tx(t, s, i) { i !== null && typeof i == "object" && typeof i.then == "function" ? i.then(function (u) { Rx(t, s, u) }, function (u) { return Pf(t, s, u) }) : Rx(t, s, i) } function Rx(t, s, i) { s.status = "fulfilled", s.value = i, Dx(s), t.state = i, s = t.pending, s !== null && (i = s.next, i === s ? t.pending = null : (i = i.next, s.next = i, Mx(t, i))) } function Pf(t, s, i) { var u = t.pending; if (t.pending = null, u !== null) { u = u.next; do s.status = "rejected", s.reason = i, Dx(s), s = s.next; while (s !== u) } t.action = null } function Dx(t) { t = t.listeners; for (var s = 0; s < t.length; s++)(0, t[s])() } function Ox(t, s) { return s } function zx(t, s) { if (at) { var i = gt.formState; if (i !== null) { e: { var u = Ze; if (at) { if (jt) { t: { for (var p = jt, v = Zn; p.nodeType !== 8;) { if (!v) { p = null; break t } if (p = Hn(p.nextSibling), p === null) { p = null; break t } } v = p.data, p = v === "F!" || v === "F" ? p : null } if (p) { jt = Hn(p.nextSibling), u = p.data === "F!"; break e } } Vr(u) } u = !1 } u && (s = i[0]) } } return i = rn(), i.memoizedState = i.baseState = s, u = { pending: null, lanes: 0, dispatch: null, lastRenderedReducer: Ox, lastRenderedState: s }, i.queue = u, i = e0.bind(null, Ze, u), u.dispatch = i, u = Bf(!1), v = Yf.bind(null, Ze, !1, u.queue), u = rn(), p = { state: s, dispatch: null, action: t, pending: null }, u.queue = p, i = d_.bind(null, Ze, p, v, i), p.dispatch = i, u.memoizedState = t, [s, i, !1] } function Ix(t) { var s = Rt(); return Lx(s, dt, t) } function Lx(t, s, i) { if (s = Hf(t, s, Ox)[0], t = ic(Es)[0], typeof s == "object" && s !== null && typeof s.then == "function") try { var u = pi(s) } catch (A) { throw A === ci ? tc : A } else u = s; s = Rt(); var p = s.queue, v = p.dispatch; return i !== s.memoizedState && (Ze.flags |= 2048, Uo(9, lc(), f_.bind(null, p, i), null)), [u, v, t] } function f_(t, s) { t.action = s } function Hx(t) { var s = Rt(), i = dt; if (i !== null) return Lx(s, i, t); Rt(), s = s.memoizedState, i = Rt(); var u = i.queue.dispatch; return i.memoizedState = t, [s, u, !1] } function Uo(t, s, i, u) { return t = { tag: t, create: i, deps: u, inst: s, next: null }, s = Ze.updateQueue, s === null && (s = If(), Ze.updateQueue = s), i = s.lastEffect, i === null ? s.lastEffect = t.next = t : (u = i.next, i.next = t, t.next = u, s.lastEffect = t), t } function lc() { return { destroy: void 0, resource: void 0 } } function $x() { return Rt().memoizedState } function cc(t, s, i, u) { var p = rn(); u = u === void 0 ? null : u, Ze.flags |= t, p.memoizedState = Uo(1 | s, lc(), i, u) } function gi(t, s, i, u) { var p = Rt(); u = u === void 0 ? null : u; var v = p.memoizedState.inst; dt !== null && u !== null && Tf(u, dt.memoizedState.deps) ? p.memoizedState = Uo(s, v, i, u) : (Ze.flags |= t, p.memoizedState = Uo(1 | s, v, i, u)) } function Bx(t, s) { cc(8390656, 8, t, s) } function Px(t, s) { gi(2048, 8, t, s) } function Ux(t, s) { return gi(4, 2, t, s) } function Vx(t, s) { return gi(4, 4, t, s) } function qx(t, s) { if (typeof s == "function") { t = t(); var i = s(t); return function () { typeof i == "function" ? i() : s(null) } } if (s != null) return t = t(), s.current = t, function () { s.current = null } } function Fx(t, s, i) { i = i != null ? i.concat([t]) : null, gi(4, 4, qx.bind(null, s, t), i) } function Uf() { } function Yx(t, s) { var i = Rt(); s = s === void 0 ? null : s; var u = i.memoizedState; return s !== null && Tf(s, u[1]) ? u[0] : (i.memoizedState = [t, s], t) } function Gx(t, s) { var i = Rt(); s = s === void 0 ? null : s; var u = i.memoizedState; if (s !== null && Tf(s, u[1])) return u[0]; if (u = t(), Gr) { Nt(!0); try { t() } finally { Nt(!1) } } return i.memoizedState = [u, s], u } function Vf(t, s, i) { return i === void 0 || (er & 1073741824) !== 0 ? t.memoizedState = s : (t.memoizedState = i, t = W0(), Ze.lanes |= t, ir |= t, i) } function Xx(t, s, i, u) { return cn(i, s) ? i : $o.current !== null ? (t = Vf(t, i, u), cn(t, s) || ($t = !0), t) : (er & 42) === 0 ? ($t = !0, t.memoizedState = i) : (t = W0(), Ze.lanes |= t, ir |= t, s) } function Zx(t, s, i, u, p) { var v = L.p; L.p = v !== 0 && 8 > v ? v : 8; var A = k.T, z = {}; k.T = z, Yf(t, !1, s, i); try { var F = p(), re = k.S; if (re !== null && re(z, F), F !== null && typeof F == "object" && typeof F.then == "function") { var le = l_(F, u); xi(t, s, le, hn(t)) } else xi(t, s, u, hn(t)) } catch (me) { xi(t, s, { then: function () { }, status: "rejected", reason: me }, hn()) } finally { L.p = v, k.T = A } } function m_() { } function qf(t, s, i, u) { if (t.tag !== 5) throw Error(a(476)); var p = Wx(t).queue; Zx(t, p, s, I, i === null ? m_ : function () { return Kx(t), i(u) }) } function Wx(t) { var s = t.memoizedState; if (s !== null) return s; s = { memoizedState: I, baseState: I, baseQueue: null, queue: { pending: null, lanes: 0, dispatch: null, lastRenderedReducer: Es, lastRenderedState: I }, next: null }; var i = {}; return s.next = { memoizedState: i, baseState: i, baseQueue: null, queue: { pending: null, lanes: 0, dispatch: null, lastRenderedReducer: Es, lastRenderedState: i }, next: null }, t.memoizedState = s, t = t.alternate, t !== null && (t.memoizedState = s), s } function Kx(t) { var s = Wx(t).next.queue; xi(t, s, {}, hn()) } function Ff() { return Zt(zi) } function Qx() { return Rt().memoizedState } function Jx() { return Rt().memoizedState } function h_(t) { for (var s = t.return; s !== null;) { switch (s.tag) { case 24: case 3: var i = hn(); t = Qs(i); var u = Js(s, t, i); u !== null && (pn(u, s, i), di(u, s, i)), s = { cache: wf() }, t.payload = s; return }s = s.return } } function p_(t, s, i) { var u = hn(); i = { lane: u, revertLane: 0, action: i, hasEagerState: !1, eagerState: null, next: null }, uc(t) ? t0(s, i) : (i = df(t, s, i, u), i !== null && (pn(i, t, u), n0(i, s, u))) } function e0(t, s, i) { var u = hn(); xi(t, s, i, u) } function xi(t, s, i, u) { var p = { lane: u, revertLane: 0, action: i, hasEagerState: !1, eagerState: null, next: null }; if (uc(t)) t0(s, p); else { var v = t.alternate; if (t.lanes === 0 && (v === null || v.lanes === 0) && (v = s.lastRenderedReducer, v !== null)) try { var A = s.lastRenderedState, z = v(A, i); if (p.hasEagerState = !0, p.eagerState = z, cn(z, A)) return Gl(t, s, p, 0), gt === null && Yl(), !1 } catch { } finally { } if (i = df(t, s, p, u), i !== null) return pn(i, t, u), n0(i, s, u), !0 } return !1 } function Yf(t, s, i, u) { if (u = { lane: 2, revertLane: jm(), action: u, hasEagerState: !1, eagerState: null, next: null }, uc(t)) { if (s) throw Error(a(479)) } else s = df(t, i, u, 2), s !== null && pn(s, t, 2) } function uc(t) { var s = t.alternate; return t === Ze || s !== null && s === Ze } function t0(t, s) { Bo = rc = !0; var i = t.pending; i === null ? s.next = s : (s.next = i.next, i.next = s), t.pending = s } function n0(t, s, i) { if ((i & 4194048) !== 0) { var u = s.lanes; u &= t.pendingLanes, i |= u, s.lanes = i, La(t, i) } } var dc = { readContext: Zt, use: ac, useCallback: Ct, useContext: Ct, useEffect: Ct, useImperativeHandle: Ct, useLayoutEffect: Ct, useInsertionEffect: Ct, useMemo: Ct, useReducer: Ct, useRef: Ct, useState: Ct, useDebugValue: Ct, useDeferredValue: Ct, useTransition: Ct, useSyncExternalStore: Ct, useId: Ct, useHostTransitionStatus: Ct, useFormState: Ct, useActionState: Ct, useOptimistic: Ct, useMemoCache: Ct, useCacheRefresh: Ct }, s0 = { readContext: Zt, use: ac, useCallback: function (t, s) { return rn().memoizedState = [t, s === void 0 ? null : s], t }, useContext: Zt, useEffect: Bx, useImperativeHandle: function (t, s, i) { i = i != null ? i.concat([t]) : null, cc(4194308, 4, qx.bind(null, s, t), i) }, useLayoutEffect: function (t, s) { return cc(4194308, 4, t, s) }, useInsertionEffect: function (t, s) { cc(4, 2, t, s) }, useMemo: function (t, s) { var i = rn(); s = s === void 0 ? null : s; var u = t(); if (Gr) { Nt(!0); try { t() } finally { Nt(!1) } } return i.memoizedState = [u, s], u }, useReducer: function (t, s, i) { var u = rn(); if (i !== void 0) { var p = i(s); if (Gr) { Nt(!0); try { i(s) } finally { Nt(!1) } } } else p = s; return u.memoizedState = u.baseState = p, t = { pending: null, lanes: 0, dispatch: null, lastRenderedReducer: t, lastRenderedState: p }, u.queue = t, t = t.dispatch = p_.bind(null, Ze, t), [u.memoizedState, t] }, useRef: function (t) { var s = rn(); return t = { current: t }, s.memoizedState = t }, useState: function (t) { t = Bf(t); var s = t.queue, i = e0.bind(null, Ze, s); return s.dispatch = i, [t.memoizedState, i] }, useDebugValue: Uf, useDeferredValue: function (t, s) { var i = rn(); return Vf(i, t, s) }, useTransition: function () { var t = Bf(!1); return t = Zx.bind(null, Ze, t.queue, !0, !1), rn().memoizedState = t, [!1, t] }, useSyncExternalStore: function (t, s, i) { var u = Ze, p = rn(); if (at) { if (i === void 0) throw Error(a(407)); i = i() } else { if (i = s(), gt === null) throw Error(a(349)); (nt & 124) !== 0 || jx(u, s, i) } p.memoizedState = i; var v = { value: i, getSnapshot: s }; return p.queue = v, Bx(Ex.bind(null, u, v, t), [t]), u.flags |= 2048, Uo(9, lc(), _x.bind(null, u, v, i, s), null), i }, useId: function () { var t = rn(), s = gt.identifierPrefix; if (at) { var i = Ss, u = Ns; i = (u & ~(1 << 32 - yt(u) - 1)).toString(32) + i, s = "«" + s + "R" + i, i = oc++, 0 < i && (s += "H" + i.toString(32)), s += "»" } else i = c_++, s = "«" + s + "r" + i.toString(32) + "»"; return t.memoizedState = s }, useHostTransitionStatus: Ff, useFormState: zx, useActionState: zx, useOptimistic: function (t) { var s = rn(); s.memoizedState = s.baseState = t; var i = { pending: null, lanes: 0, dispatch: null, lastRenderedReducer: null, lastRenderedState: null }; return s.queue = i, s = Yf.bind(null, Ze, !0, i), i.dispatch = s, [t, s] }, useMemoCache: Lf, useCacheRefresh: function () { return rn().memoizedState = h_.bind(null, Ze) } }, r0 = { readContext: Zt, use: ac, useCallback: Yx, useContext: Zt, useEffect: Px, useImperativeHandle: Fx, useInsertionEffect: Ux, useLayoutEffect: Vx, useMemo: Gx, useReducer: ic, useRef: $x, useState: function () { return ic(Es) }, useDebugValue: Uf, useDeferredValue: function (t, s) { var i = Rt(); return Xx(i, dt.memoizedState, t, s) }, useTransition: function () { var t = ic(Es)[0], s = Rt().memoizedState; return [typeof t == "boolean" ? t : pi(t), s] }, useSyncExternalStore: Sx, useId: Qx, useHostTransitionStatus: Ff, useFormState: Ix, useActionState: Ix, useOptimistic: function (t, s) { var i = Rt(); return Ax(i, dt, t, s) }, useMemoCache: Lf, useCacheRefresh: Jx }, g_ = { readContext: Zt, use: ac, useCallback: Yx, useContext: Zt, useEffect: Px, useImperativeHandle: Fx, useInsertionEffect: Ux, useLayoutEffect: Vx, useMemo: Gx, useReducer: $f, useRef: $x, useState: function () { return $f(Es) }, useDebugValue: Uf, useDeferredValue: function (t, s) { var i = Rt(); return dt === null ? Vf(i, t, s) : Xx(i, dt.memoizedState, t, s) }, useTransition: function () { var t = $f(Es)[0], s = Rt().memoizedState; return [typeof t == "boolean" ? t : pi(t), s] }, useSyncExternalStore: Sx, useId: Qx, useHostTransitionStatus: Ff, useFormState: Hx, useActionState: Hx, useOptimistic: function (t, s) { var i = Rt(); return dt !== null ? Ax(i, dt, t, s) : (i.baseState = t, [t, i.queue.dispatch]) }, useMemoCache: Lf, useCacheRefresh: Jx }, Vo = null, yi = 0; function fc(t) { var s = yi; return yi += 1, Vo === null && (Vo = []), px(Vo, t, s) } function vi(t, s) { s = s.props.ref, t.ref = s !== void 0 ? s : null } function mc(t, s) { throw s.$$typeof === y ? Error(a(525)) : (t = Object.prototype.toString.call(s), Error(a(31, t === "[object Object]" ? "object with keys {" + Object.keys(s).join(", ") + "}" : t))) } function o0(t) { var s = t._init; return s(t._payload) } function a0(t) { function s(Q, X) { if (t) { var se = Q.deletions; se === null ? (Q.deletions = [X], Q.flags |= 16) : se.push(X) } } function i(Q, X) { if (!t) return null; for (; X !== null;)s(Q, X), X = X.sibling; return null } function u(Q) { for (var X = new Map; Q !== null;)Q.key !== null ? X.set(Q.key, Q) : X.set(Q.index, Q), Q = Q.sibling; return X } function p(Q, X) { return Q = ws(Q, X), Q.index = 0, Q.sibling = null, Q } function v(Q, X, se) { return Q.index = se, t ? (se = Q.alternate, se !== null ? (se = se.index, se < X ? (Q.flags |= 67108866, X) : se) : (Q.flags |= 67108866, X)) : (Q.flags |= 1048576, X) } function A(Q) { return t && Q.alternate === null && (Q.flags |= 67108866), Q } function z(Q, X, se, ce) { return X === null || X.tag !== 6 ? (X = mf(se, Q.mode, ce), X.return = Q, X) : (X = p(X, se), X.return = Q, X) } function F(Q, X, se, ce) { var Ce = se.type; return Ce === S ? le(Q, X, se.props.children, ce, se.key) : X !== null && (X.elementType === Ce || typeof Ce == "object" && Ce !== null && Ce.$$typeof === B && o0(Ce) === X.type) ? (X = p(X, se.props), vi(X, se), X.return = Q, X) : (X = Zl(se.type, se.key, se.props, null, Q.mode, ce), vi(X, se), X.return = Q, X) } function re(Q, X, se, ce) { return X === null || X.tag !== 4 || X.stateNode.containerInfo !== se.containerInfo || X.stateNode.implementation !== se.implementation ? (X = hf(se, Q.mode, ce), X.return = Q, X) : (X = p(X, se.children || []), X.return = Q, X) } function le(Q, X, se, ce, Ce) { return X === null || X.tag !== 7 ? (X = $r(se, Q.mode, ce, Ce), X.return = Q, X) : (X = p(X, se), X.return = Q, X) } function me(Q, X, se) { if (typeof X == "string" && X !== "" || typeof X == "number" || typeof X == "bigint") return X = mf("" + X, Q.mode, se), X.return = Q, X; if (typeof X == "object" && X !== null) { switch (X.$$typeof) { case x: return se = Zl(X.type, X.key, X.props, null, Q.mode, se), vi(se, X), se.return = Q, se; case b: return X = hf(X, Q.mode, se), X.return = Q, X; case B: var ce = X._init; return X = ce(X._payload), me(Q, X, se) }if (U(X) || G(X)) return X = $r(X, Q.mode, se, null), X.return = Q, X; if (typeof X.then == "function") return me(Q, fc(X), se); if (X.$$typeof === E) return me(Q, Jl(Q, X), se); mc(Q, X) } return null } function oe(Q, X, se, ce) { var Ce = X !== null ? X.key : null; if (typeof se == "string" && se !== "" || typeof se == "number" || typeof se == "bigint") return Ce !== null ? null : z(Q, X, "" + se, ce); if (typeof se == "object" && se !== null) { switch (se.$$typeof) { case x: return se.key === Ce ? F(Q, X, se, ce) : null; case b: return se.key === Ce ? re(Q, X, se, ce) : null; case B: return Ce = se._init, se = Ce(se._payload), oe(Q, X, se, ce) }if (U(se) || G(se)) return Ce !== null ? null : le(Q, X, se, ce, null); if (typeof se.then == "function") return oe(Q, X, fc(se), ce); if (se.$$typeof === E) return oe(Q, X, Jl(Q, se), ce); mc(Q, se) } return null } function ae(Q, X, se, ce, Ce) { if (typeof ce == "string" && ce !== "" || typeof ce == "number" || typeof ce == "bigint") return Q = Q.get(se) || null, z(X, Q, "" + ce, Ce); if (typeof ce == "object" && ce !== null) { switch (ce.$$typeof) { case x: return Q = Q.get(ce.key === null ? se : ce.key) || null, F(X, Q, ce, Ce); case b: return Q = Q.get(ce.key === null ? se : ce.key) || null, re(X, Q, ce, Ce); case B: var Ke = ce._init; return ce = Ke(ce._payload), ae(Q, X, se, ce, Ce) }if (U(ce) || G(ce)) return Q = Q.get(se) || null, le(X, Q, ce, Ce, null); if (typeof ce.then == "function") return ae(Q, X, se, fc(ce), Ce); if (ce.$$typeof === E) return ae(Q, X, se, Jl(X, ce), Ce); mc(X, ce) } return null } function qe(Q, X, se, ce) { for (var Ce = null, Ke = null, Oe = X, Ve = X = 0, Pt = null; Oe !== null && Ve < se.length; Ve++) { Oe.index > Ve ? (Pt = Oe, Oe = null) : Pt = Oe.sibling; var rt = oe(Q, Oe, se[Ve], ce); if (rt === null) { Oe === null && (Oe = Pt); break } t && Oe && rt.alternate === null && s(Q, Oe), X = v(rt, X, Ve), Ke === null ? Ce = rt : Ke.sibling = rt, Ke = rt, Oe = Pt } if (Ve === se.length) return i(Q, Oe), at && Pr(Q, Ve), Ce; if (Oe === null) { for (; Ve < se.length; Ve++)Oe = me(Q, se[Ve], ce), Oe !== null && (X = v(Oe, X, Ve), Ke === null ? Ce = Oe : Ke.sibling = Oe, Ke = Oe); return at && Pr(Q, Ve), Ce } for (Oe = u(Oe); Ve < se.length; Ve++)Pt = ae(Oe, Q, Ve, se[Ve], ce), Pt !== null && (t && Pt.alternate !== null && Oe.delete(Pt.key === null ? Ve : Pt.key), X = v(Pt, X, Ve), Ke === null ? Ce = Pt : Ke.sibling = Pt, Ke = Pt); return t && Oe.forEach(function (gr) { return s(Q, gr) }), at && Pr(Q, Ve), Ce } function Be(Q, X, se, ce) { if (se == null) throw Error(a(151)); for (var Ce = null, Ke = null, Oe = X, Ve = X = 0, Pt = null, rt = se.next(); Oe !== null && !rt.done; Ve++, rt = se.next()) { Oe.index > Ve ? (Pt = Oe, Oe = null) : Pt = Oe.sibling; var gr = oe(Q, Oe, rt.value, ce); if (gr === null) { Oe === null && (Oe = Pt); break } t && Oe && gr.alternate === null && s(Q, Oe), X = v(gr, X, Ve), Ke === null ? Ce = gr : Ke.sibling = gr, Ke = gr, Oe = Pt } if (rt.done) return i(Q, Oe), at && Pr(Q, Ve), Ce; if (Oe === null) { for (; !rt.done; Ve++, rt = se.next())rt = me(Q, rt.value, ce), rt !== null && (X = v(rt, X, Ve), Ke === null ? Ce = rt : Ke.sibling = rt, Ke = rt); return at && Pr(Q, Ve), Ce } for (Oe = u(Oe); !rt.done; Ve++, rt = se.next())rt = ae(Oe, Q, Ve, rt.value, ce), rt !== null && (t && rt.alternate !== null && Oe.delete(rt.key === null ? Ve : rt.key), X = v(rt, X, Ve), Ke === null ? Ce = rt : Ke.sibling = rt, Ke = rt); return t && Oe.forEach(function (xE) { return s(Q, xE) }), at && Pr(Q, Ve), Ce } function mt(Q, X, se, ce) { if (typeof se == "object" && se !== null && se.type === S && se.key === null && (se = se.props.children), typeof se == "object" && se !== null) { switch (se.$$typeof) { case x: e: { for (var Ce = se.key; X !== null;) { if (X.key === Ce) { if (Ce = se.type, Ce === S) { if (X.tag === 7) { i(Q, X.sibling), ce = p(X, se.props.children), ce.return = Q, Q = ce; break e } } else if (X.elementType === Ce || typeof Ce == "object" && Ce !== null && Ce.$$typeof === B && o0(Ce) === X.type) { i(Q, X.sibling), ce = p(X, se.props), vi(ce, se), ce.return = Q, Q = ce; break e } i(Q, X); break } else s(Q, X); X = X.sibling } se.type === S ? (ce = $r(se.props.children, Q.mode, ce, se.key), ce.return = Q, Q = ce) : (ce = Zl(se.type, se.key, se.props, null, Q.mode, ce), vi(ce, se), ce.return = Q, Q = ce) } return A(Q); case b: e: { for (Ce = se.key; X !== null;) { if (X.key === Ce) if (X.tag === 4 && X.stateNode.containerInfo === se.containerInfo && X.stateNode.implementation === se.implementation) { i(Q, X.sibling), ce = p(X, se.children || []), ce.return = Q, Q = ce; break e } else { i(Q, X); break } else s(Q, X); X = X.sibling } ce = hf(se, Q.mode, ce), ce.return = Q, Q = ce } return A(Q); case B: return Ce = se._init, se = Ce(se._payload), mt(Q, X, se, ce) }if (U(se)) return qe(Q, X, se, ce); if (G(se)) { if (Ce = G(se), typeof Ce != "function") throw Error(a(150)); return se = Ce.call(se), Be(Q, X, se, ce) } if (typeof se.then == "function") return mt(Q, X, fc(se), ce); if (se.$$typeof === E) return mt(Q, X, Jl(Q, se), ce); mc(Q, se) } return typeof se == "string" && se !== "" || typeof se == "number" || typeof se == "bigint" ? (se = "" + se, X !== null && X.tag === 6 ? (i(Q, X.sibling), ce = p(X, se), ce.return = Q, Q = ce) : (i(Q, X), ce = mf(se, Q.mode, ce), ce.return = Q, Q = ce), A(Q)) : i(Q, X) } return function (Q, X, se, ce) { try { yi = 0; var Ce = mt(Q, X, se, ce); return Vo = null, Ce } catch (Oe) { if (Oe === ci || Oe === tc) throw Oe; var Ke = un(29, Oe, null, Q.mode); return Ke.lanes = ce, Ke.return = Q, Ke } finally { } } } var qo = a0(!0), i0 = a0(!1), En = $(null), Wn = null; function tr(t) { var s = t.alternate; V(zt, zt.current & 1), V(En, t), Wn === null && (s === null || $o.current !== null || s.memoizedState !== null) && (Wn = t) } function l0(t) { if (t.tag === 22) { if (V(zt, zt.current), V(En, t), Wn === null) { var s = t.alternate; s !== null && s.memoizedState !== null && (Wn = t) } } else nr() } function nr() { V(zt, zt.current), V(En, En.current) } function Cs(t) { Y(En), Wn === t && (Wn = null), Y(zt) } var zt = $(0); function hc(t) { for (var s = t; s !== null;) { if (s.tag === 13) { var i = s.memoizedState; if (i !== null && (i = i.dehydrated, i === null || i.data === "$?" || Im(i))) return s } else if (s.tag === 19 && s.memoizedProps.revealOrder !== void 0) { if ((s.flags & 128) !== 0) return s } else if (s.child !== null) { s.child.return = s, s = s.child; continue } if (s === t) break; for (; s.sibling === null;) { if (s.return === null || s.return === t) return null; s = s.return } s.sibling.return = s.return, s = s.sibling } return null } function Gf(t, s, i, u) { s = t.memoizedState, i = i(u, s), i = i == null ? s : g({}, s, i), t.memoizedState = i, t.lanes === 0 && (t.updateQueue.baseState = i) } var Xf = { enqueueSetState: function (t, s, i) { t = t._reactInternals; var u = hn(), p = Qs(u); p.payload = s, i != null && (p.callback = i), s = Js(t, p, u), s !== null && (pn(s, t, u), di(s, t, u)) }, enqueueReplaceState: function (t, s, i) { t = t._reactInternals; var u = hn(), p = Qs(u); p.tag = 1, p.payload = s, i != null && (p.callback = i), s = Js(t, p, u), s !== null && (pn(s, t, u), di(s, t, u)) }, enqueueForceUpdate: function (t, s) { t = t._reactInternals; var i = hn(), u = Qs(i); u.tag = 2, s != null && (u.callback = s), s = Js(t, u, i), s !== null && (pn(s, t, i), di(s, t, i)) } }; function c0(t, s, i, u, p, v, A) { return t = t.stateNode, typeof t.shouldComponentUpdate == "function" ? t.shouldComponentUpdate(u, v, A) : s.prototype && s.prototype.isPureReactComponent ? !ti(i, u) || !ti(p, v) : !0 } function u0(t, s, i, u) { t = s.state, typeof s.componentWillReceiveProps == "function" && s.componentWillReceiveProps(i, u), typeof s.UNSAFE_componentWillReceiveProps == "function" && s.UNSAFE_componentWillReceiveProps(i, u), s.state !== t && Xf.enqueueReplaceState(s, s.state, null) } function Xr(t, s) { var i = s; if ("ref" in s) { i = {}; for (var u in s) u !== "ref" && (i[u] = s[u]) } if (t = t.defaultProps) { i === s && (i = g({}, i)); for (var p in t) i[p] === void 0 && (i[p] = t[p]) } return i } var pc = typeof reportError == "function" ? reportError : function (t) { if (typeof window == "object" && typeof window.ErrorEvent == "function") { var s = new window.ErrorEvent("error", { bubbles: !0, cancelable: !0, message: typeof t == "object" && t !== null && typeof t.message == "string" ? String(t.message) : String(t), error: t }); if (!window.dispatchEvent(s)) return } else if (typeof process == "object" && typeof process.emit == "function") { process.emit("uncaughtException", t); return } console.error(t) }; function d0(t) { pc(t) } function f0(t) { console.error(t) } function m0(t) { pc(t) } function gc(t, s) { try { var i = t.onUncaughtError; i(s.value, { componentStack: s.stack }) } catch (u) { setTimeout(function () { throw u }) } } function h0(t, s, i) { try { var u = t.onCaughtError; u(i.value, { componentStack: i.stack, errorBoundary: s.tag === 1 ? s.stateNode : null }) } catch (p) { setTimeout(function () { throw p }) } } function Zf(t, s, i) { return i = Qs(i), i.tag = 3, i.payload = { element: null }, i.callback = function () { gc(t, s) }, i } function p0(t) { return t = Qs(t), t.tag = 3, t } function g0(t, s, i, u) { var p = i.type.getDerivedStateFromError; if (typeof p == "function") { var v = u.value; t.payload = function () { return p(v) }, t.callback = function () { h0(s, i, u) } } var A = i.stateNode; A !== null && typeof A.componentDidCatch == "function" && (t.callback = function () { h0(s, i, u), typeof p != "function" && (lr === null ? lr = new Set([this]) : lr.add(this)); var z = u.stack; this.componentDidCatch(u.value, { componentStack: z !== null ? z : "" }) }) } function x_(t, s, i, u, p) { if (i.flags |= 32768, u !== null && typeof u == "object" && typeof u.then == "function") { if (s = i.alternate, s !== null && ai(s, i, p, !0), i = En.current, i !== null) { switch (i.tag) { case 13: return Wn === null ? vm() : i.alternate === null && _t === 0 && (_t = 3), i.flags &= -257, i.flags |= 65536, i.lanes = p, u === jf ? i.flags |= 16384 : (s = i.updateQueue, s === null ? i.updateQueue = new Set([u]) : s.add(u), wm(t, u, p)), !1; case 22: return i.flags |= 65536, u === jf ? i.flags |= 16384 : (s = i.updateQueue, s === null ? (s = { transitions: null, markerInstances: null, retryQueue: new Set([u]) }, i.updateQueue = s) : (i = s.retryQueue, i === null ? s.retryQueue = new Set([u]) : i.add(u)), wm(t, u, p)), !1 }throw Error(a(435, i.tag)) } return wm(t, u, p), vm(), !1 } if (at) return s = En.current, s !== null ? ((s.flags & 65536) === 0 && (s.flags |= 256), s.flags |= 65536, s.lanes = p, u !== xf && (t = Error(a(422), { cause: u }), oi(Nn(t, i)))) : (u !== xf && (s = Error(a(423), { cause: u }), oi(Nn(s, i))), t = t.current.alternate, t.flags |= 65536, p &= -p, t.lanes |= p, u = Nn(u, i), p = Zf(t.stateNode, u, p), Cf(t, p), _t !== 4 && (_t = 2)), !1; var v = Error(a(520), { cause: u }); if (v = Nn(v, i), Ei === null ? Ei = [v] : Ei.push(v), _t !== 4 && (_t = 2), s === null) return !0; u = Nn(u, i), i = s; do { switch (i.tag) { case 3: return i.flags |= 65536, t = p & -p, i.lanes |= t, t = Zf(i.stateNode, u, t), Cf(i, t), !1; case 1: if (s = i.type, v = i.stateNode, (i.flags & 128) === 0 && (typeof s.getDerivedStateFromError == "function" || v !== null && typeof v.componentDidCatch == "function" && (lr === null || !lr.has(v)))) return i.flags |= 65536, p &= -p, i.lanes |= p, p = p0(p), g0(p, t, i, u), Cf(i, p), !1 }i = i.return } while (i !== null); return !1 } var x0 = Error(a(461)), $t = !1; function Vt(t, s, i, u) { s.child = t === null ? i0(s, null, i, u) : qo(s, t.child, i, u) } function y0(t, s, i, u, p) { i = i.render; var v = s.ref; if ("ref" in u) { var A = {}; for (var z in u) z !== "ref" && (A[z] = u[z]) } else A = u; return Fr(s), u = Rf(t, s, i, A, v, p), z = Df(), t !== null && !$t ? (Of(t, s, p), ks(t, s, p)) : (at && z && pf(s), s.flags |= 1, Vt(t, s, u, p), s.child) } function v0(t, s, i, u, p) { if (t === null) { var v = i.type; return typeof v == "function" && !ff(v) && v.defaultProps === void 0 && i.compare === null ? (s.tag = 15, s.type = v, b0(t, s, v, u, p)) : (t = Zl(i.type, null, u, s, s.mode, p), t.ref = s.ref, t.return = s, s.child = t) } if (v = t.child, !sm(t, p)) { var A = v.memoizedProps; if (i = i.compare, i = i !== null ? i : ti, i(A, u) && t.ref === s.ref) return ks(t, s, p) } return s.flags |= 1, t = ws(v, u), t.ref = s.ref, t.return = s, s.child = t } function b0(t, s, i, u, p) { if (t !== null) { var v = t.memoizedProps; if (ti(v, u) && t.ref === s.ref) if ($t = !1, s.pendingProps = u = v, sm(t, p)) (t.flags & 131072) !== 0 && ($t = !0); else return s.lanes = t.lanes, ks(t, s, p) } return Wf(t, s, i, u, p) } function w0(t, s, i) { var u = s.pendingProps, p = u.children, v = t !== null ? t.memoizedState : null; if (u.mode === "hidden") { if ((s.flags & 128) !== 0) { if (u = v !== null ? v.baseLanes | i : i, t !== null) { for (p = s.child = t.child, v = 0; p !== null;)v = v | p.lanes | p.childLanes, p = p.sibling; s.childLanes = v & ~u } else s.childLanes = 0, s.child = null; return N0(t, s, u, i) } if ((i & 536870912) !== 0) s.memoizedState = { baseLanes: 0, cachePool: null }, t !== null && ec(s, v !== null ? v.cachePool : null), v !== null ? bx(s, v) : Af(), l0(s); else return s.lanes = s.childLanes = 536870912, N0(t, s, v !== null ? v.baseLanes | i : i, i) } else v !== null ? (ec(s, v.cachePool), bx(s, v), nr(), s.memoizedState = null) : (t !== null && ec(s, null), Af(), nr()); return Vt(t, s, p, i), s.child } function N0(t, s, i, u) { var p = Sf(); return p = p === null ? null : { parent: Ot._currentValue, pool: p }, s.memoizedState = { baseLanes: i, cachePool: p }, t !== null && ec(s, null), Af(), l0(s), t !== null && ai(t, s, u, !0), null } function xc(t, s) { var i = s.ref; if (i === null) t !== null && t.ref !== null && (s.flags |= 4194816); else { if (typeof i != "function" && typeof i != "object") throw Error(a(284)); (t === null || t.ref !== i) && (s.flags |= 4194816) } } function Wf(t, s, i, u, p) { return Fr(s), i = Rf(t, s, i, u, void 0, p), u = Df(), t !== null && !$t ? (Of(t, s, p), ks(t, s, p)) : (at && u && pf(s), s.flags |= 1, Vt(t, s, i, p), s.child) } function S0(t, s, i, u, p, v) { return Fr(s), s.updateQueue = null, i = Nx(s, u, i, p), wx(t), u = Df(), t !== null && !$t ? (Of(t, s, v), ks(t, s, v)) : (at && u && pf(s), s.flags |= 1, Vt(t, s, i, v), s.child) } function j0(t, s, i, u, p) { if (Fr(s), s.stateNode === null) { var v = Oo, A = i.contextType; typeof A == "object" && A !== null && (v = Zt(A)), v = new i(u, v), s.memoizedState = v.state !== null && v.state !== void 0 ? v.state : null, v.updater = Xf, s.stateNode = v, v._reactInternals = s, v = s.stateNode, v.props = u, v.state = s.memoizedState, v.refs = {}, _f(s), A = i.contextType, v.context = typeof A == "object" && A !== null ? Zt(A) : Oo, v.state = s.memoizedState, A = i.getDerivedStateFromProps, typeof A == "function" && (Gf(s, i, A, u), v.state = s.memoizedState), typeof i.getDerivedStateFromProps == "function" || typeof v.getSnapshotBeforeUpdate == "function" || typeof v.UNSAFE_componentWillMount != "function" && typeof v.componentWillMount != "function" || (A = v.state, typeof v.componentWillMount == "function" && v.componentWillMount(), typeof v.UNSAFE_componentWillMount == "function" && v.UNSAFE_componentWillMount(), A !== v.state && Xf.enqueueReplaceState(v, v.state, null), mi(s, u, v, p), fi(), v.state = s.memoizedState), typeof v.componentDidMount == "function" && (s.flags |= 4194308), u = !0 } else if (t === null) { v = s.stateNode; var z = s.memoizedProps, F = Xr(i, z); v.props = F; var re = v.context, le = i.contextType; A = Oo, typeof le == "object" && le !== null && (A = Zt(le)); var me = i.getDerivedStateFromProps; le = typeof me == "function" || typeof v.getSnapshotBeforeUpdate == "function", z = s.pendingProps !== z, le || typeof v.UNSAFE_componentWillReceiveProps != "function" && typeof v.componentWillReceiveProps != "function" || (z || re !== A) && u0(s, v, u, A), Ks = !1; var oe = s.memoizedState; v.state = oe, mi(s, u, v, p), fi(), re = s.memoizedState, z || oe !== re || Ks ? (typeof me == "function" && (Gf(s, i, me, u), re = s.memoizedState), (F = Ks || c0(s, i, F, u, oe, re, A)) ? (le || typeof v.UNSAFE_componentWillMount != "function" && typeof v.componentWillMount != "function" || (typeof v.componentWillMount == "function" && v.componentWillMount(), typeof v.UNSAFE_componentWillMount == "function" && v.UNSAFE_componentWillMount()), typeof v.componentDidMount == "function" && (s.flags |= 4194308)) : (typeof v.componentDidMount == "function" && (s.flags |= 4194308), s.memoizedProps = u, s.memoizedState = re), v.props = u, v.state = re, v.context = A, u = F) : (typeof v.componentDidMount == "function" && (s.flags |= 4194308), u = !1) } else { v = s.stateNode, Ef(t, s), A = s.memoizedProps, le = Xr(i, A), v.props = le, me = s.pendingProps, oe = v.context, re = i.contextType, F = Oo, typeof re == "object" && re !== null && (F = Zt(re)), z = i.getDerivedStateFromProps, (re = typeof z == "function" || typeof v.getSnapshotBeforeUpdate == "function") || typeof v.UNSAFE_componentWillReceiveProps != "function" && typeof v.componentWillReceiveProps != "function" || (A !== me || oe !== F) && u0(s, v, u, F), Ks = !1, oe = s.memoizedState, v.state = oe, mi(s, u, v, p), fi(); var ae = s.memoizedState; A !== me || oe !== ae || Ks || t !== null && t.dependencies !== null && Ql(t.dependencies) ? (typeof z == "function" && (Gf(s, i, z, u), ae = s.memoizedState), (le = Ks || c0(s, i, le, u, oe, ae, F) || t !== null && t.dependencies !== null && Ql(t.dependencies)) ? (re || typeof v.UNSAFE_componentWillUpdate != "function" && typeof v.componentWillUpdate != "function" || (typeof v.componentWillUpdate == "function" && v.componentWillUpdate(u, ae, F), typeof v.UNSAFE_componentWillUpdate == "function" && v.UNSAFE_componentWillUpdate(u, ae, F)), typeof v.componentDidUpdate == "function" && (s.flags |= 4), typeof v.getSnapshotBeforeUpdate == "function" && (s.flags |= 1024)) : (typeof v.componentDidUpdate != "function" || A === t.memoizedProps && oe === t.memoizedState || (s.flags |= 4), typeof v.getSnapshotBeforeUpdate != "function" || A === t.memoizedProps && oe === t.memoizedState || (s.flags |= 1024), s.memoizedProps = u, s.memoizedState = ae), v.props = u, v.state = ae, v.context = F, u = le) : (typeof v.componentDidUpdate != "function" || A === t.memoizedProps && oe === t.memoizedState || (s.flags |= 4), typeof v.getSnapshotBeforeUpdate != "function" || A === t.memoizedProps && oe === t.memoizedState || (s.flags |= 1024), u = !1) } return v = u, xc(t, s), u = (s.flags & 128) !== 0, v || u ? (v = s.stateNode, i = u && typeof i.getDerivedStateFromError != "function" ? null : v.render(), s.flags |= 1, t !== null && u ? (s.child = qo(s, t.child, null, p), s.child = qo(s, null, i, p)) : Vt(t, s, i, p), s.memoizedState = v.state, t = s.child) : t = ks(t, s, p), t } function _0(t, s, i, u) { return ri(), s.flags |= 256, Vt(t, s, i, u), s.child } var Kf = { dehydrated: null, treeContext: null, retryLane: 0, hydrationErrors: null }; function Qf(t) { return { baseLanes: t, cachePool: fx() } } function Jf(t, s, i) { return t = t !== null ? t.childLanes & ~i : 0, s && (t |= Cn), t } function E0(t, s, i) { var u = s.pendingProps, p = !1, v = (s.flags & 128) !== 0, A; if ((A = v) || (A = t !== null && t.memoizedState === null ? !1 : (zt.current & 2) !== 0), A && (p = !0, s.flags &= -129), A = (s.flags & 32) !== 0, s.flags &= -33, t === null) { if (at) { if (p ? tr(s) : nr(), at) { var z = jt, F; if (F = z) { e: { for (F = z, z = Zn; F.nodeType !== 8;) { if (!z) { z = null; break e } if (F = Hn(F.nextSibling), F === null) { z = null; break e } } z = F } z !== null ? (s.memoizedState = { dehydrated: z, treeContext: Br !== null ? { id: Ns, overflow: Ss } : null, retryLane: 536870912, hydrationErrors: null }, F = un(18, null, null, 0), F.stateNode = z, F.return = s, s.child = F, Kt = s, jt = null, F = !0) : F = !1 } F || Vr(s) } if (z = s.memoizedState, z !== null && (z = z.dehydrated, z !== null)) return Im(z) ? s.lanes = 32 : s.lanes = 536870912, null; Cs(s) } return z = u.children, u = u.fallback, p ? (nr(), p = s.mode, z = yc({ mode: "hidden", children: z }, p), u = $r(u, p, i, null), z.return = s, u.return = s, z.sibling = u, s.child = z, p = s.child, p.memoizedState = Qf(i), p.childLanes = Jf(t, A, i), s.memoizedState = Kf, u) : (tr(s), em(s, z)) } if (F = t.memoizedState, F !== null && (z = F.dehydrated, z !== null)) { if (v) s.flags & 256 ? (tr(s), s.flags &= -257, s = tm(t, s, i)) : s.memoizedState !== null ? (nr(), s.child = t.child, s.flags |= 128, s = null) : (nr(), p = u.fallback, z = s.mode, u = yc({ mode: "visible", children: u.children }, z), p = $r(p, z, i, null), p.flags |= 2, u.return = s, p.return = s, u.sibling = p, s.child = u, qo(s, t.child, null, i), u = s.child, u.memoizedState = Qf(i), u.childLanes = Jf(t, A, i), s.memoizedState = Kf, s = p); else if (tr(s), Im(z)) { if (A = z.nextSibling && z.nextSibling.dataset, A) var re = A.dgst; A = re, u = Error(a(419)), u.stack = "", u.digest = A, oi({ value: u, source: null, stack: null }), s = tm(t, s, i) } else if ($t || ai(t, s, i, !1), A = (i & t.childLanes) !== 0, $t || A) { if (A = gt, A !== null && (u = i & -i, u = (u & 42) !== 0 ? 1 : Ha(u), u = (u & (A.suspendedLanes | i)) !== 0 ? 0 : u, u !== 0 && u !== F.retryLane)) throw F.retryLane = u, Do(t, u), pn(A, t, u), x0; z.data === "$?" || vm(), s = tm(t, s, i) } else z.data === "$?" ? (s.flags |= 192, s.child = t.child, s = null) : (t = F.treeContext, jt = Hn(z.nextSibling), Kt = s, at = !0, Ur = null, Zn = !1, t !== null && (jn[_n++] = Ns, jn[_n++] = Ss, jn[_n++] = Br, Ns = t.id, Ss = t.overflow, Br = s), s = em(s, u.children), s.flags |= 4096); return s } return p ? (nr(), p = u.fallback, z = s.mode, F = t.child, re = F.sibling, u = ws(F, { mode: "hidden", children: u.children }), u.subtreeFlags = F.subtreeFlags & 65011712, re !== null ? p = ws(re, p) : (p = $r(p, z, i, null), p.flags |= 2), p.return = s, u.return = s, u.sibling = p, s.child = u, u = p, p = s.child, z = t.child.memoizedState, z === null ? z = Qf(i) : (F = z.cachePool, F !== null ? (re = Ot._currentValue, F = F.parent !== re ? { parent: re, pool: re } : F) : F = fx(), z = { baseLanes: z.baseLanes | i, cachePool: F }), p.memoizedState = z, p.childLanes = Jf(t, A, i), s.memoizedState = Kf, u) : (tr(s), i = t.child, t = i.sibling, i = ws(i, { mode: "visible", children: u.children }), i.return = s, i.sibling = null, t !== null && (A = s.deletions, A === null ? (s.deletions = [t], s.flags |= 16) : A.push(t)), s.child = i, s.memoizedState = null, i) } function em(t, s) { return s = yc({ mode: "visible", children: s }, t.mode), s.return = t, t.child = s } function yc(t, s) { return t = un(22, t, null, s), t.lanes = 0, t.stateNode = { _visibility: 1, _pendingMarkers: null, _retryCache: null, _transitions: null }, t } function tm(t, s, i) { return qo(s, t.child, null, i), t = em(s, s.pendingProps.children), t.flags |= 2, s.memoizedState = null, t } function C0(t, s, i) { t.lanes |= s; var u = t.alternate; u !== null && (u.lanes |= s), vf(t.return, s, i) } function nm(t, s, i, u, p) { var v = t.memoizedState; v === null ? t.memoizedState = { isBackwards: s, rendering: null, renderingStartTime: 0, last: u, tail: i, tailMode: p } : (v.isBackwards = s, v.rendering = null, v.renderingStartTime = 0, v.last = u, v.tail = i, v.tailMode = p) } function k0(t, s, i) { var u = s.pendingProps, p = u.revealOrder, v = u.tail; if (Vt(t, s, u.children, i), u = zt.current, (u & 2) !== 0) u = u & 1 | 2, s.flags |= 128; else { if (t !== null && (t.flags & 128) !== 0) e: for (t = s.child; t !== null;) { if (t.tag === 13) t.memoizedState !== null && C0(t, i, s); else if (t.tag === 19) C0(t, i, s); else if (t.child !== null) { t.child.return = t, t = t.child; continue } if (t === s) break e; for (; t.sibling === null;) { if (t.return === null || t.return === s) break e; t = t.return } t.sibling.return = t.return, t = t.sibling } u &= 1 } switch (V(zt, u), p) { case "forwards": for (i = s.child, p = null; i !== null;)t = i.alternate, t !== null && hc(t) === null && (p = i), i = i.sibling; i = p, i === null ? (p = s.child, s.child = null) : (p = i.sibling, i.sibling = null), nm(s, !1, p, i, v); break; case "backwards": for (i = null, p = s.child, s.child = null; p !== null;) { if (t = p.alternate, t !== null && hc(t) === null) { s.child = p; break } t = p.sibling, p.sibling = i, i = p, p = t } nm(s, !0, i, null, v); break; case "together": nm(s, !1, null, null, void 0); break; default: s.memoizedState = null }return s.child } function ks(t, s, i) { if (t !== null && (s.dependencies = t.dependencies), ir |= s.lanes, (i & s.childLanes) === 0) if (t !== null) { if (ai(t, s, i, !1), (i & s.childLanes) === 0) return null } else return null; if (t !== null && s.child !== t.child) throw Error(a(153)); if (s.child !== null) { for (t = s.child, i = ws(t, t.pendingProps), s.child = i, i.return = s; t.sibling !== null;)t = t.sibling, i = i.sibling = ws(t, t.pendingProps), i.return = s; i.sibling = null } return s.child } function sm(t, s) { return (t.lanes & s) !== 0 ? !0 : (t = t.dependencies, !!(t !== null && Ql(t))) } function y_(t, s, i) { switch (s.tag) { case 3: ie(s, s.stateNode.containerInfo), Ws(s, Ot, t.memoizedState.cache), ri(); break; case 27: case 5: be(s); break; case 4: ie(s, s.stateNode.containerInfo); break; case 10: Ws(s, s.type, s.memoizedProps.value); break; case 13: var u = s.memoizedState; if (u !== null) return u.dehydrated !== null ? (tr(s), s.flags |= 128, null) : (i & s.child.childLanes) !== 0 ? E0(t, s, i) : (tr(s), t = ks(t, s, i), t !== null ? t.sibling : null); tr(s); break; case 19: var p = (t.flags & 128) !== 0; if (u = (i & s.childLanes) !== 0, u || (ai(t, s, i, !1), u = (i & s.childLanes) !== 0), p) { if (u) return k0(t, s, i); s.flags |= 128 } if (p = s.memoizedState, p !== null && (p.rendering = null, p.tail = null, p.lastEffect = null), V(zt, zt.current), u) break; return null; case 22: case 23: return s.lanes = 0, w0(t, s, i); case 24: Ws(s, Ot, t.memoizedState.cache) }return ks(t, s, i) } function A0(t, s, i) { if (t !== null) if (t.memoizedProps !== s.pendingProps) $t = !0; else { if (!sm(t, i) && (s.flags & 128) === 0) return $t = !1, y_(t, s, i); $t = (t.flags & 131072) !== 0 } else $t = !1, at && (s.flags & 1048576) !== 0 && ox(s, Kl, s.index); switch (s.lanes = 0, s.tag) { case 16: e: { t = s.pendingProps; var u = s.elementType, p = u._init; if (u = p(u._payload), s.type = u, typeof u == "function") ff(u) ? (t = Xr(u, t), s.tag = 1, s = j0(null, s, u, t, i)) : (s.tag = 0, s = Wf(null, s, u, t, i)); else { if (u != null) { if (p = u.$$typeof, p === T) { s.tag = 11, s = y0(null, s, u, t, i); break e } else if (p === O) { s.tag = 14, s = v0(null, s, u, t, i); break e } } throw s = P(u) || u, Error(a(306, s, "")) } } return s; case 0: return Wf(t, s, s.type, s.pendingProps, i); case 1: return u = s.type, p = Xr(u, s.pendingProps), j0(t, s, u, p, i); case 3: e: { if (ie(s, s.stateNode.containerInfo), t === null) throw Error(a(387)); u = s.pendingProps; var v = s.memoizedState; p = v.element, Ef(t, s), mi(s, u, null, i); var A = s.memoizedState; if (u = A.cache, Ws(s, Ot, u), u !== v.cache && bf(s, [Ot], i, !0), fi(), u = A.element, v.isDehydrated) if (v = { element: u, isDehydrated: !1, cache: A.cache }, s.updateQueue.baseState = v, s.memoizedState = v, s.flags & 256) { s = _0(t, s, u, i); break e } else if (u !== p) { p = Nn(Error(a(424)), s), oi(p), s = _0(t, s, u, i); break e } else { switch (t = s.stateNode.containerInfo, t.nodeType) { case 9: t = t.body; break; default: t = t.nodeName === "HTML" ? t.ownerDocument.body : t }for (jt = Hn(t.firstChild), Kt = s, at = !0, Ur = null, Zn = !0, i = i0(s, null, u, i), s.child = i; i;)i.flags = i.flags & -3 | 4096, i = i.sibling } else { if (ri(), u === p) { s = ks(t, s, i); break e } Vt(t, s, u, i) } s = s.child } return s; case 26: return xc(t, s), t === null ? (i = Dy(s.type, null, s.pendingProps, null)) ? s.memoizedState = i : at || (i = s.type, t = s.pendingProps, u = Rc(ue.current).createElement(i), u[Ht] = s, u[Xt] = t, Ft(u, i, t), Mt(u), s.stateNode = u) : s.memoizedState = Dy(s.type, t.memoizedProps, s.pendingProps, t.memoizedState), null; case 27: return be(s), t === null && at && (u = s.stateNode = My(s.type, s.pendingProps, ue.current), Kt = s, Zn = !0, p = jt, dr(s.type) ? (Lm = p, jt = Hn(u.firstChild)) : jt = p), Vt(t, s, s.pendingProps.children, i), xc(t, s), t === null && (s.flags |= 4194304), s.child; case 5: return t === null && at && ((p = u = jt) && (u = Y_(u, s.type, s.pendingProps, Zn), u !== null ? (s.stateNode = u, Kt = s, jt = Hn(u.firstChild), Zn = !1, p = !0) : p = !1), p || Vr(s)), be(s), p = s.type, v = s.pendingProps, A = t !== null ? t.memoizedProps : null, u = v.children, Dm(p, v) ? u = null : A !== null && Dm(p, A) && (s.flags |= 32), s.memoizedState !== null && (p = Rf(t, s, u_, null, null, i), zi._currentValue = p), xc(t, s), Vt(t, s, u, i), s.child; case 6: return t === null && at && ((t = i = jt) && (i = G_(i, s.pendingProps, Zn), i !== null ? (s.stateNode = i, Kt = s, jt = null, t = !0) : t = !1), t || Vr(s)), null; case 13: return E0(t, s, i); case 4: return ie(s, s.stateNode.containerInfo), u = s.pendingProps, t === null ? s.child = qo(s, null, u, i) : Vt(t, s, u, i), s.child; case 11: return y0(t, s, s.type, s.pendingProps, i); case 7: return Vt(t, s, s.pendingProps, i), s.child; case 8: return Vt(t, s, s.pendingProps.children, i), s.child; case 12: return Vt(t, s, s.pendingProps.children, i), s.child; case 10: return u = s.pendingProps, Ws(s, s.type, u.value), Vt(t, s, u.children, i), s.child; case 9: return p = s.type._context, u = s.pendingProps.children, Fr(s), p = Zt(p), u = u(p), s.flags |= 1, Vt(t, s, u, i), s.child; case 14: return v0(t, s, s.type, s.pendingProps, i); case 15: return b0(t, s, s.type, s.pendingProps, i); case 19: return k0(t, s, i); case 31: return u = s.pendingProps, i = s.mode, u = { mode: u.mode, children: u.children }, t === null ? (i = yc(u, i), i.ref = s.ref, s.child = i, i.return = s, s = i) : (i = ws(t.child, u), i.ref = s.ref, s.child = i, i.return = s, s = i), s; case 22: return w0(t, s, i); case 24: return Fr(s), u = Zt(Ot), t === null ? (p = Sf(), p === null && (p = gt, v = wf(), p.pooledCache = v, v.refCount++, v !== null && (p.pooledCacheLanes |= i), p = v), s.memoizedState = { parent: u, cache: p }, _f(s), Ws(s, Ot, p)) : ((t.lanes & i) !== 0 && (Ef(t, s), mi(s, null, null, i), fi()), p = t.memoizedState, v = s.memoizedState, p.parent !== u ? (p = { parent: u, cache: u }, s.memoizedState = p, s.lanes === 0 && (s.memoizedState = s.updateQueue.baseState = p), Ws(s, Ot, u)) : (u = v.cache, Ws(s, Ot, u), u !== p.cache && bf(s, [Ot], i, !0))), Vt(t, s, s.pendingProps.children, i), s.child; case 29: throw s.pendingProps }throw Error(a(156, s.tag)) } function As(t) { t.flags |= 4 } function M0(t, s) { if (s.type !== "stylesheet" || (s.state.loading & 4) !== 0) t.flags &= -16777217; else if (t.flags |= 16777216, !Hy(s)) { if (s = En.current, s !== null && ((nt & 4194048) === nt ? Wn !== null : (nt & 62914560) !== nt && (nt & 536870912) === 0 || s !== Wn)) throw ui = jf, mx; t.flags |= 8192 } } function vc(t, s) { s !== null && (t.flags |= 4), t.flags & 16384 && (s = t.tag !== 22 ? ot() : 536870912, t.lanes |= s, Xo |= s) } function bi(t, s) { if (!at) switch (t.tailMode) { case "hidden": s = t.tail; for (var i = null; s !== null;)s.alternate !== null && (i = s), s = s.sibling; i === null ? t.tail = null : i.sibling = null; break; case "collapsed": i = t.tail; for (var u = null; i !== null;)i.alternate !== null && (u = i), i = i.sibling; u === null ? s || t.tail === null ? t.tail = null : t.tail.sibling = null : u.sibling = null } } function St(t) { var s = t.alternate !== null && t.alternate.child === t.child, i = 0, u = 0; if (s) for (var p = t.child; p !== null;)i |= p.lanes | p.childLanes, u |= p.subtreeFlags & 65011712, u |= p.flags & 65011712, p.return = t, p = p.sibling; else for (p = t.child; p !== null;)i |= p.lanes | p.childLanes, u |= p.subtreeFlags, u |= p.flags, p.return = t, p = p.sibling; return t.subtreeFlags |= u, t.childLanes = i, s } function v_(t, s, i) { var u = s.pendingProps; switch (gf(s), s.tag) { case 31: case 16: case 15: case 0: case 11: case 7: case 8: case 12: case 9: case 14: return St(s), null; case 1: return St(s), null; case 3: return i = s.stateNode, u = null, t !== null && (u = t.memoizedState.cache), s.memoizedState.cache !== u && (s.flags |= 2048), _s(Ot), ge(), i.pendingContext && (i.context = i.pendingContext, i.pendingContext = null), (t === null || t.child === null) && (si(s) ? As(s) : t === null || t.memoizedState.isDehydrated && (s.flags & 256) === 0 || (s.flags |= 1024, lx())), St(s), null; case 26: return i = s.memoizedState, t === null ? (As(s), i !== null ? (St(s), M0(s, i)) : (St(s), s.flags &= -16777217)) : i ? i !== t.memoizedState ? (As(s), St(s), M0(s, i)) : (St(s), s.flags &= -16777217) : (t.memoizedProps !== u && As(s), St(s), s.flags &= -16777217), null; case 27: we(s), i = ue.current; var p = s.type; if (t !== null && s.stateNode != null) t.memoizedProps !== u && As(s); else { if (!u) { if (s.stateNode === null) throw Error(a(166)); return St(s), null } t = W.current, si(s) ? ax(s) : (t = My(p, u, i), s.stateNode = t, As(s)) } return St(s), null; case 5: if (we(s), i = s.type, t !== null && s.stateNode != null) t.memoizedProps !== u && As(s); else { if (!u) { if (s.stateNode === null) throw Error(a(166)); return St(s), null } if (t = W.current, si(s)) ax(s); else { switch (p = Rc(ue.current), t) { case 1: t = p.createElementNS("http://www.w3.org/2000/svg", i); break; case 2: t = p.createElementNS("http://www.w3.org/1998/Math/MathML", i); break; default: switch (i) { case "svg": t = p.createElementNS("http://www.w3.org/2000/svg", i); break; case "math": t = p.createElementNS("http://www.w3.org/1998/Math/MathML", i); break; case "script": t = p.createElement("div"), t.innerHTML = "