diff --git a/python/packages/core/agent_framework/_workflows/__init__.py b/python/packages/core/agent_framework/_workflows/__init__.py index 7c8b43dcc6..e0f9a1cbc7 100644 --- a/python/packages/core/agent_framework/_workflows/__init__.py +++ b/python/packages/core/agent_framework/_workflows/__init__.py @@ -52,29 +52,29 @@ handler, ) from ._function_executor import FunctionExecutor, executor +from ._group_chat import ( + DEFAULT_MANAGER_INSTRUCTIONS, + DEFAULT_MANAGER_STRUCTURED_OUTPUT_PROMPT, + GroupChatBuilder, + GroupChatDirective, + GroupChatStateSnapshot, + ManagerDirectiveModel, +) from ._handoff import HandoffBuilder, HandoffUserInputRequest from ._magentic import ( MagenticAgentDeltaEvent, - MagenticAgentExecutor, MagenticAgentMessageEvent, MagenticBuilder, - MagenticCallbackEvent, - MagenticCallbackMode, MagenticContext, MagenticFinalResultEvent, MagenticManagerBase, - MagenticOrchestratorExecutor, MagenticOrchestratorMessageEvent, MagenticPlanReviewDecision, MagenticPlanReviewReply, MagenticPlanReviewRequest, - MagenticProgressLedger, - MagenticProgressLedgerItem, - MagenticRequestMessage, - MagenticResponseMessage, - MagenticStartMessage, StandardMagenticManager, ) +from ._orchestration_state import OrchestrationState from ._request_info_executor import ( PendingRequestDetails, RequestInfoExecutor, @@ -105,6 +105,8 @@ from ._workflow_executor import WorkflowExecutor __all__ = [ + "DEFAULT_MANAGER_INSTRUCTIONS", + "DEFAULT_MANAGER_STRUCTURED_OUTPUT_PROMPT", "DEFAULT_MAX_ITERATIONS", "AgentExecutor", "AgentExecutorRequest", @@ -128,30 +130,26 @@ "FileCheckpointStorage", "FunctionExecutor", "GraphConnectivityError", + "GroupChatBuilder", + "GroupChatDirective", + "GroupChatStateSnapshot", "HandoffBuilder", "HandoffUserInputRequest", "InMemoryCheckpointStorage", "InProcRunnerContext", "MagenticAgentDeltaEvent", - "MagenticAgentExecutor", "MagenticAgentMessageEvent", "MagenticBuilder", - "MagenticCallbackEvent", - "MagenticCallbackMode", "MagenticContext", "MagenticFinalResultEvent", "MagenticManagerBase", - "MagenticOrchestratorExecutor", "MagenticOrchestratorMessageEvent", "MagenticPlanReviewDecision", "MagenticPlanReviewReply", "MagenticPlanReviewRequest", - "MagenticProgressLedger", - "MagenticProgressLedgerItem", - "MagenticRequestMessage", - "MagenticResponseMessage", - "MagenticStartMessage", + "ManagerDirectiveModel", "Message", + "OrchestrationState", "PendingRequestDetails", "RequestInfoEvent", "RequestInfoExecutor", diff --git a/python/packages/core/agent_framework/_workflows/__init__.pyi b/python/packages/core/agent_framework/_workflows/__init__.pyi index 8055e625a2..fbce568ec7 100644 --- a/python/packages/core/agent_framework/_workflows/__init__.pyi +++ b/python/packages/core/agent_framework/_workflows/__init__.pyi @@ -50,29 +50,28 @@ from ._executor import ( handler, ) from ._function_executor import FunctionExecutor, executor +from ._group_chat import ( + DEFAULT_MANAGER_INSTRUCTIONS, + DEFAULT_MANAGER_STRUCTURED_OUTPUT_PROMPT, + GroupChatBuilder, + GroupChatDirective, + GroupChatStateSnapshot, +) from ._handoff import HandoffBuilder, HandoffUserInputRequest from ._magentic import ( MagenticAgentDeltaEvent, - MagenticAgentExecutor, MagenticAgentMessageEvent, MagenticBuilder, - MagenticCallbackEvent, - MagenticCallbackMode, MagenticContext, MagenticFinalResultEvent, MagenticManagerBase, - MagenticOrchestratorExecutor, MagenticOrchestratorMessageEvent, MagenticPlanReviewDecision, MagenticPlanReviewReply, MagenticPlanReviewRequest, - MagenticProgressLedger, - MagenticProgressLedgerItem, - MagenticRequestMessage, - MagenticResponseMessage, - MagenticStartMessage, StandardMagenticManager, ) +from ._orchestration_state import OrchestrationState from ._request_info_executor import ( PendingRequestDetails, RequestInfoExecutor, @@ -103,6 +102,8 @@ from ._workflow_context import WorkflowContext from ._workflow_executor import WorkflowExecutor __all__ = [ + "DEFAULT_MANAGER_INSTRUCTIONS", + "DEFAULT_MANAGER_STRUCTURED_OUTPUT_PROMPT", "DEFAULT_MAX_ITERATIONS", "AgentExecutor", "AgentExecutorRequest", @@ -126,30 +127,25 @@ __all__ = [ "FileCheckpointStorage", "FunctionExecutor", "GraphConnectivityError", + "GroupChatBuilder", + "GroupChatDirective", + "GroupChatStateSnapshot", "HandoffBuilder", "HandoffUserInputRequest", "InMemoryCheckpointStorage", "InProcRunnerContext", "MagenticAgentDeltaEvent", - "MagenticAgentExecutor", "MagenticAgentMessageEvent", "MagenticBuilder", - "MagenticCallbackEvent", - "MagenticCallbackMode", "MagenticContext", "MagenticFinalResultEvent", "MagenticManagerBase", - "MagenticOrchestratorExecutor", "MagenticOrchestratorMessageEvent", "MagenticPlanReviewDecision", "MagenticPlanReviewReply", "MagenticPlanReviewRequest", - "MagenticProgressLedger", - "MagenticProgressLedgerItem", - "MagenticRequestMessage", - "MagenticResponseMessage", - "MagenticStartMessage", "Message", + "OrchestrationState", "PendingRequestDetails", "RequestInfoEvent", "RequestInfoExecutor", diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py index cf50f192c5..e1ae5302d1 100644 --- a/python/packages/core/agent_framework/_workflows/_agent.py +++ b/python/packages/core/agent_framework/_workflows/_agent.py @@ -3,7 +3,7 @@ import json import logging import uuid -from collections.abc import AsyncIterable, Sequence +from collections.abc import AsyncIterable from dataclasses import dataclass from datetime import datetime from typing import TYPE_CHECKING, Any, ClassVar, TypedDict, cast @@ -19,7 +19,6 @@ FunctionCallContent, FunctionResultContent, Role, - TextContent, UsageDetails, ) @@ -29,6 +28,7 @@ RequestInfoEvent, WorkflowEvent, ) +from ._message_utils import normalize_messages_input if TYPE_CHECKING: from ._workflow import Workflow @@ -131,7 +131,7 @@ async def run( """ # Collect all streaming updates response_updates: list[AgentRunResponseUpdate] = [] - input_messages = self._normalize_messages(messages) + input_messages = normalize_messages_input(messages) thread = thread or self.get_new_thread() response_id = str(uuid.uuid4()) @@ -165,7 +165,7 @@ async def run_stream( Yields: AgentRunResponseUpdate objects representing the workflow execution progress. """ - input_messages = self._normalize_messages(messages) + input_messages = normalize_messages_input(messages) thread = thread or self.get_new_thread() response_updates: list[AgentRunResponseUpdate] = [] response_id = str(uuid.uuid4()) @@ -225,28 +225,6 @@ async def _run_stream_impl( if update: yield update - def _normalize_messages( - self, - messages: str | ChatMessage | Sequence[str] | Sequence[ChatMessage] | None = None, - ) -> list[ChatMessage]: - """Normalize input messages to a list of ChatMessage objects.""" - if messages is None: - return [] - - if isinstance(messages, str): - return [ChatMessage(role=Role.USER, contents=[TextContent(text=messages)])] - - if isinstance(messages, ChatMessage): - return [messages] - - normalized: list[ChatMessage] = [] - for msg in messages: - if isinstance(msg, str): - normalized.append(ChatMessage(role=Role.USER, contents=[TextContent(text=msg)])) - elif isinstance(msg, ChatMessage): - normalized.append(msg) - return normalized - def _convert_workflow_event_to_agent_update( self, response_id: str, diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index b92c845a4d..07f737db77 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -12,6 +12,7 @@ AgentRunUpdateEvent, # type: ignore[reportPrivateUsage] ) from ._executor import Executor, handler +from ._message_utils import normalize_messages_input from ._workflow_context import WorkflowContext logger = logging.getLogger(__name__) @@ -167,7 +168,7 @@ async def from_response( @handler async def from_str(self, text: str, ctx: WorkflowContext[AgentExecutorResponse, AgentRunResponse]) -> None: """Accept a raw user prompt string and run the agent (one-shot).""" - self._cache = [ChatMessage(role="user", text=text)] # type: ignore[arg-type] + self._cache = normalize_messages_input(text) await self._run_agent_and_emit(ctx) @handler @@ -177,15 +178,50 @@ async def from_message( ctx: WorkflowContext[AgentExecutorResponse, AgentRunResponse], ) -> None: """Accept a single ChatMessage as input.""" - self._cache = [message] + self._cache = normalize_messages_input(message) await self._run_agent_and_emit(ctx) @handler async def from_messages( self, - messages: list[ChatMessage], + messages: list[str | ChatMessage], ctx: WorkflowContext[AgentExecutorResponse, AgentRunResponse], ) -> None: - """Accept a list of ChatMessage objects as conversation context.""" - self._cache = list(messages) + """Accept a list of chat inputs (strings or ChatMessage) as conversation context.""" + self._cache = normalize_messages_input(messages) await self._run_agent_and_emit(ctx) + + def snapshot_state(self) -> dict[str, Any]: + """Capture current executor state for checkpointing. + + Returns: + Dict containing serialized cache state + """ + from ._conversation_state import encode_chat_messages + + return { + "cache": encode_chat_messages(self._cache), + } + + def restore_state(self, state: dict[str, Any]) -> None: + """Restore executor state from checkpoint. + + Args: + state: Checkpoint data dict + """ + from ._conversation_state import decode_chat_messages + + cache_payload = state.get("cache") + if cache_payload: + try: + self._cache = decode_chat_messages(cache_payload) + except Exception as exc: + logger.warning("Failed to restore cache: %s", exc) + self._cache = [] + else: + self._cache = [] + + def reset(self) -> None: + """Reset the internal cache of the executor.""" + logger.debug("AgentExecutor %s: Resetting cache", self.id) + self._cache.clear() diff --git a/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py b/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py new file mode 100644 index 0000000000..5752febab5 --- /dev/null +++ b/python/packages/core/agent_framework/_workflows/_base_group_chat_orchestrator.py @@ -0,0 +1,265 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Base class for group chat orchestrators that manages conversation flow and participant selection.""" + +import inspect +import logging +from abc import ABC, abstractmethod +from collections.abc import Awaitable, Callable, Sequence +from typing import Any + +from .._types import ChatMessage +from ._executor import Executor +from ._orchestrator_helpers import ParticipantRegistry +from ._workflow_context import WorkflowContext + +logger = logging.getLogger(__name__) + + +class BaseGroupChatOrchestrator(Executor, ABC): + """Abstract base class for group chat orchestrators. + + Provides shared functionality for participant registration, routing, + and round limit checking that is common across all group chat patterns. + + Subclasses must implement pattern-specific orchestration logic while + inheriting the common participant management infrastructure. + """ + + def __init__(self, executor_id: str) -> None: + """Initialize base orchestrator. + + Args: + executor_id: Unique identifier for this orchestrator executor + """ + super().__init__(executor_id) + self._registry = ParticipantRegistry() + # Shared conversation state management + self._conversation: list[ChatMessage] = [] + self._round_index: int = 0 + self._max_rounds: int | None = None + self._termination_condition: Callable[[list[ChatMessage]], bool | Awaitable[bool]] | None = None + + def register_participant_entry(self, name: str, *, entry_id: str, is_agent: bool) -> None: + """Record routing details for a participant's entry executor. + + This method provides a unified interface for registering participants + across all orchestrator patterns, whether they are agents or custom executors. + + Args: + name: Participant name (used for selection and tracking) + entry_id: Executor ID for this participant's entry point + is_agent: Whether this is an AgentExecutor (True) or custom Executor (False) + """ + self._registry.register(name, entry_id=entry_id, is_agent=is_agent) + + # Conversation state management (shared across all patterns) + + def _append_messages(self, messages: Sequence[ChatMessage]) -> None: + """Append messages to the conversation history. + + Args: + messages: Messages to append + """ + self._conversation.extend(messages) + + def _get_conversation(self) -> list[ChatMessage]: + """Get a copy of the current conversation. + + Returns: + Cloned conversation list + """ + return list(self._conversation) + + def _clear_conversation(self) -> None: + """Clear the conversation history.""" + self._conversation.clear() + + def _increment_round(self) -> None: + """Increment the round counter.""" + self._round_index += 1 + + async def _check_termination(self) -> bool: + """Check if conversation should terminate based on termination condition. + + Supports both synchronous and asynchronous termination conditions. + + Returns: + True if termination condition met, False otherwise + """ + if self._termination_condition is None: + return False + + result = self._termination_condition(self._get_conversation()) + if inspect.iscoroutine(result) or inspect.isawaitable(result): + result = await result + return bool(result) + + @abstractmethod + def _get_author_name(self) -> str: + """Get the author name for orchestrator-generated messages. + + Subclasses must implement this to provide a stable author name + for completion messages and other orchestrator-generated content. + + Returns: + Author name to use for messages generated by this orchestrator + """ + ... + + def _create_completion_message( + self, + text: str | None = None, + reason: str = "completed", + ) -> ChatMessage: + """Create a standardized completion message. + + Args: + text: Optional message text (auto-generated if None) + reason: Completion reason for default text + + Returns: + ChatMessage with completion content + """ + from .._types import Role + + message_text = text or f"Conversation {reason}." + return ChatMessage( + role=Role.ASSISTANT, + text=message_text, + author_name=self._get_author_name(), + ) + + # Participant routing (shared across all patterns) + + async def _route_to_participant( + self, + participant_name: str, + conversation: list[ChatMessage], + ctx: WorkflowContext[Any, Any], + *, + instruction: str | None = None, + task: ChatMessage | None = None, + metadata: dict[str, Any] | None = None, + ) -> None: + """Route a conversation to a participant. + + This method handles the dual envelope pattern: + - AgentExecutors receive AgentExecutorRequest (messages only) + - Custom executors receive GroupChatRequestMessage (full context) + + Args: + participant_name: Name of the participant to route to + conversation: Conversation history to send + ctx: Workflow context for message routing + instruction: Optional instruction from manager/orchestrator + task: Optional task context + metadata: Optional metadata dict + + Raises: + ValueError: If participant is not registered + """ + from ._agent_executor import AgentExecutorRequest + from ._orchestrator_helpers import prepare_participant_request + + entry_id = self._registry.get_entry_id(participant_name) + if entry_id is None: + raise ValueError(f"No registered entry executor for participant '{participant_name}'.") + + if self._registry.is_agent(participant_name): + # AgentExecutors receive simple message list + await ctx.send_message( + AgentExecutorRequest(messages=conversation, should_respond=True), + target_id=entry_id, + ) + else: + # Custom executors receive full context envelope + request = prepare_participant_request( + participant_name=participant_name, + conversation=conversation, + instruction=instruction or "", + task=task, + metadata=metadata, + ) + await ctx.send_message(request, target_id=entry_id) + + # Round limit enforcement (shared across all patterns) + + def _check_round_limit(self) -> bool: + """Check if round limit has been reached. + + Uses instance variables _round_index and _max_rounds. + + Returns: + True if limit reached, False otherwise + """ + if self._max_rounds is None: + return False + + if self._round_index >= self._max_rounds: + logger.warning( + "%s reached max_rounds=%s; forcing completion.", + self.__class__.__name__, + self._max_rounds, + ) + return True + + return False + + # State persistence (shared across all patterns) + + # State persistence (shared across all patterns) + + def snapshot_state(self) -> dict[str, Any]: + """Capture current orchestrator state for checkpointing. + + Default implementation uses OrchestrationState to serialize common state. + Subclasses should override _snapshot_pattern_metadata() to add pattern-specific data. + + Returns: + Serialized state dict + """ + from ._orchestration_state import OrchestrationState + + state = OrchestrationState( + conversation=list(self._conversation), + round_index=self._round_index, + metadata=self._snapshot_pattern_metadata(), + ) + return state.to_dict() + + def _snapshot_pattern_metadata(self) -> dict[str, Any]: + """Serialize pattern-specific state. + + Override this method to add pattern-specific checkpoint data. + + Returns: + Dict with pattern-specific state (empty by default) + """ + return {} + + def restore_state(self, state: dict[str, Any]) -> None: + """Restore orchestrator state from checkpoint. + + Default implementation uses OrchestrationState to deserialize common state. + Subclasses should override _restore_pattern_metadata() to restore pattern-specific data. + + Args: + state: Serialized state dict + """ + from ._orchestration_state import OrchestrationState + + orch_state = OrchestrationState.from_dict(state) + self._conversation = list(orch_state.conversation) + self._round_index = orch_state.round_index + self._restore_pattern_metadata(orch_state.metadata) + + def _restore_pattern_metadata(self, metadata: dict[str, Any]) -> None: + """Restore pattern-specific state. + + Override this method to restore pattern-specific checkpoint data. + + Args: + metadata: Pattern-specific state dict + """ + pass diff --git a/python/packages/core/agent_framework/_workflows/_concurrent.py b/python/packages/core/agent_framework/_workflows/_concurrent.py index 2d78126553..6b3e1ac05e 100644 --- a/python/packages/core/agent_framework/_workflows/_concurrent.py +++ b/python/packages/core/agent_framework/_workflows/_concurrent.py @@ -13,6 +13,7 @@ from ._agent_executor import AgentExecutorRequest, AgentExecutorResponse from ._checkpoint import CheckpointStorage from ._executor import Executor, handler +from ._message_utils import normalize_messages_input from ._workflow import Workflow from ._workflow_builder import WorkflowBuilder from ._workflow_context import WorkflowContext @@ -50,17 +51,21 @@ async def from_request(self, request: AgentExecutorRequest, ctx: WorkflowContext @handler async def from_str(self, prompt: str, ctx: WorkflowContext[AgentExecutorRequest]) -> None: - request = AgentExecutorRequest(messages=[ChatMessage(Role.USER, text=prompt)], should_respond=True) + request = AgentExecutorRequest(messages=normalize_messages_input(prompt), should_respond=True) await ctx.send_message(request) @handler - async def from_message(self, message: ChatMessage, ctx: WorkflowContext[AgentExecutorRequest]) -> None: # type: ignore[name-defined] - request = AgentExecutorRequest(messages=[message], should_respond=True) + async def from_message(self, message: ChatMessage, ctx: WorkflowContext[AgentExecutorRequest]) -> None: + request = AgentExecutorRequest(messages=normalize_messages_input(message), should_respond=True) await ctx.send_message(request) @handler - async def from_messages(self, messages: list[ChatMessage], ctx: WorkflowContext[AgentExecutorRequest]) -> None: # type: ignore[name-defined] - request = AgentExecutorRequest(messages=list(messages), should_respond=True) + async def from_messages( + self, + messages: list[str | ChatMessage], + ctx: WorkflowContext[AgentExecutorRequest], + ) -> None: + request = AgentExecutorRequest(messages=normalize_messages_input(messages), should_respond=True) await ctx.send_message(request) diff --git a/python/packages/core/agent_framework/_workflows/_conversation_history.py b/python/packages/core/agent_framework/_workflows/_conversation_history.py new file mode 100644 index 0000000000..7e19671b27 --- /dev/null +++ b/python/packages/core/agent_framework/_workflows/_conversation_history.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Helpers for managing chat conversation history. + +These utilities operate on standard `list[ChatMessage]` collections and simple +dictionary snapshots so orchestrators can share logic without new mixins. +""" + +import json +from collections.abc import Mapping, Sequence +from typing import Any + +from .._types import ChatMessage + + +def latest_user_message(conversation: Sequence[ChatMessage]) -> ChatMessage: + """Return the most recent user-authored message from `conversation`.""" + for message in reversed(conversation): + role_value = getattr(message.role, "value", message.role) + if str(role_value).lower() == "user": + return message + raise ValueError("No user message in conversation") + + +def ensure_author(message: ChatMessage, fallback: str) -> ChatMessage: + """Attach `fallback` author if message is missing `author_name`.""" + message.author_name = message.author_name or fallback + return message + + +def snapshot_state(conversation: Sequence[ChatMessage]) -> dict[str, Any]: + """Build an immutable snapshot for checkpoint storage.""" + if hasattr(conversation, "to_dict"): + result = conversation.to_dict() # type: ignore[attr-defined] + if isinstance(result, dict): + return result # type: ignore[return-value] + if isinstance(result, Mapping): + return dict(result) # type: ignore[arg-type] + serialisable: list[dict[str, Any]] = [] + for message in conversation: + if hasattr(message, "to_dict") and callable(message.to_dict): # type: ignore[attr-defined] + msg_dict = message.to_dict() # type: ignore[attr-defined] + serialisable.append(dict(msg_dict) if isinstance(msg_dict, Mapping) else msg_dict) # type: ignore[arg-type] + elif hasattr(message, "to_json") and callable(message.to_json): # type: ignore[attr-defined] + json_payload = message.to_json() # type: ignore[attr-defined] + parsed = json.loads(json_payload) if isinstance(json_payload, str) else json_payload + serialisable.append(dict(parsed) if isinstance(parsed, Mapping) else parsed) # type: ignore[arg-type] + else: + serialisable.append(dict(getattr(message, "__dict__", {}))) # type: ignore[arg-type] + return {"messages": serialisable} diff --git a/python/packages/core/agent_framework/_workflows/_executor.py b/python/packages/core/agent_framework/_workflows/_executor.py index 1f822e870a..852a6eef8e 100644 --- a/python/packages/core/agent_framework/_workflows/_executor.py +++ b/python/packages/core/agent_framework/_workflows/_executor.py @@ -450,13 +450,7 @@ def to_dict(self) -> dict[str, Any]: def handler( func: Callable[[ExecutorT, Any, ContextT], Awaitable[Any]], -) -> ( - Callable[[ExecutorT, Any, ContextT], Awaitable[Any]] - | Callable[ - [Callable[[ExecutorT, Any, ContextT], Awaitable[Any]]], - Callable[[ExecutorT, Any, ContextT], Awaitable[Any]], - ] -): +) -> Callable[[ExecutorT, Any, ContextT], Awaitable[Any]]: """Decorator to register a handler for an executor. Args: diff --git a/python/packages/core/agent_framework/_workflows/_group_chat.py b/python/packages/core/agent_framework/_workflows/_group_chat.py new file mode 100644 index 0000000000..84859a4f0c --- /dev/null +++ b/python/packages/core/agent_framework/_workflows/_group_chat.py @@ -0,0 +1,1442 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Group chat orchestration primitives. + +This module introduces a reusable orchestration surface for manager-directed +multi-agent conversations. The key components are: + +- GroupChatRequestMessage / GroupChatResponseMessage: canonical envelopes used + between the orchestrator and participants. +- Group chat managers: minimal asynchronous callables for pluggable coordination logic. +- GroupChatOrchestratorExecutor: runtime state machine that delegates to a + manager to select the next participant or complete the task. +- GroupChatBuilder: high-level builder that wires managers and participants + into a workflow graph. It mirrors the ergonomics of SequentialBuilder and + ConcurrentBuilder while allowing Magentic to reuse the same infrastructure. + +The default wiring uses AgentExecutor under the hood for agent participants so +existing observability and streaming semantics continue to apply. +""" + +import inspect +import itertools +import logging +from collections.abc import Awaitable, Callable, Mapping, Sequence +from dataclasses import dataclass, field +from types import MappingProxyType +from typing import Any, TypeAlias +from uuid import uuid4 + +from pydantic import BaseModel, Field + +from .._agents import AgentProtocol +from .._clients import ChatClientProtocol +from .._types import ChatMessage, Role +from ._agent_executor import AgentExecutorRequest, AgentExecutorResponse +from ._base_group_chat_orchestrator import BaseGroupChatOrchestrator +from ._checkpoint import CheckpointStorage +from ._conversation_history import ensure_author, latest_user_message +from ._executor import Executor, handler +from ._participant_utils import GroupChatParticipantSpec, prepare_participant_metadata, wrap_participant +from ._workflow import Workflow +from ._workflow_builder import WorkflowBuilder +from ._workflow_context import WorkflowContext + +logger = logging.getLogger(__name__) + + +# region Message primitives + + +@dataclass +class _GroupChatRequestMessage: + """Internal: Request envelope sent from the orchestrator to a participant.""" + + agent_name: str + conversation: list[ChatMessage] = field(default_factory=list) # type: ignore + instruction: str = "" + task: ChatMessage | None = None + metadata: dict[str, Any] | None = None + + +@dataclass +class _GroupChatResponseMessage: + """Internal: Response envelope emitted by participants back to the orchestrator.""" + + agent_name: str + message: ChatMessage + + +@dataclass +class _GroupChatTurn: + """Internal: Represents a single turn in the manager-participant conversation.""" + + speaker: str + role: str + message: ChatMessage + + +@dataclass +class GroupChatDirective: + """Instruction emitted by a group chat manager implementation.""" + + agent_name: str | None = None + instruction: str | None = None + metadata: dict[str, Any] | None = None + finish: bool = False + final_message: ChatMessage | None = None + + +# endregion + + +# region Manager callable + + +GroupChatStateSnapshot = Mapping[str, Any] +_GroupChatManagerFn = Callable[[GroupChatStateSnapshot], Awaitable[GroupChatDirective]] + + +async def _maybe_await(value: Any) -> Any: + """Await value if it is awaitable; otherwise return as-is.""" + if inspect.isawaitable(value): + return await value + return value + + +_GroupChatParticipantPipeline: TypeAlias = Sequence[Executor] + + +@dataclass +class _GroupChatConfig: + """Internal: Configuration passed to factories during workflow assembly. + + Attributes: + manager: Manager instance responsible for orchestration decisions (None when custom factory handles it) + manager_name: Display name for the manager in conversation history + participants: Mapping of participant names to their specifications + max_rounds: Optional limit on manager selection rounds to prevent infinite loops + orchestrator: Orchestrator executor instance (populated during build) + """ + + manager: _GroupChatManagerFn | None + manager_name: str + participants: Mapping[str, GroupChatParticipantSpec] + max_rounds: int | None = None + orchestrator: Executor | None = None + participant_aliases: dict[str, str] = field(default_factory=dict) # type: ignore[type-arg] + participant_executors: dict[str, Executor] = field(default_factory=dict) # type: ignore[type-arg] + + +# endregion + + +# region Default participant factory + +_GroupChatOrchestratorFactory: TypeAlias = Callable[[_GroupChatConfig], Executor] +_InterceptorSpec: TypeAlias = tuple[Callable[[_GroupChatConfig], Executor], Callable[[Any], bool]] + + +def _default_participant_factory( + spec: GroupChatParticipantSpec, + wiring: _GroupChatConfig, +) -> _GroupChatParticipantPipeline: + """Default factory for constructing participant pipeline nodes in the workflow graph. + + Creates a single AgentExecutor node for AgentProtocol participants or a passthrough executor + for custom participants. Translation between group-chat envelopes and the agent runtime is now + handled inside the orchestrator, removing the need for dedicated ingress/egress adapters. + + Args: + spec: Participant specification containing name, instance, and description + wiring: GroupChatWiring configuration for accessing cached executors + + Returns: + Sequence of executors representing the participant pipeline in execution order + + Behavior: + - AgentProtocol participants are wrapped in AgentExecutor with deterministic IDs + - Executor participants are wired directly without additional adapters + """ + participant = spec.participant + if isinstance(participant, Executor): + return (participant,) + + cached = wiring.participant_executors.get(spec.name) + if cached is not None: + return (cached,) + + agent_executor = wrap_participant(participant, executor_id=f"groupchat_agent:{spec.name}") + return (agent_executor,) + + +# endregion + + +# region Default orchestrator + + +class GroupChatOrchestratorExecutor(BaseGroupChatOrchestrator): + """Executor that orchestrates a group chat between multiple participants using a manager. + + This is the central runtime state machine that drives multi-agent conversations. It + maintains conversation state, delegates speaker selection to a manager, routes messages + to participants, and collects responses in a loop until the manager signals completion. + + Core responsibilities: + - Accept initial input as str, ChatMessage, or list[ChatMessage] + - Maintain conversation history and turn tracking + - Query manager for next action (select participant or finish) + - Route requests to selected participants using AgentExecutorRequest or GroupChatRequestMessage + - Collect participant responses and append to conversation + - Enforce optional round limits to prevent infinite loops + - Yield final completion message and transition to idle state + + State management: + - _conversation: Growing list of all messages (user, manager, agents) + - _history: Turn-by-turn record with speaker attribution and roles + - _task_message: Original user task extracted from input + - _pending_agent: Name of agent currently processing a request + - _round_index: Count of manager selection rounds for limit enforcement + + Manager interaction: + The orchestrator builds immutable state snapshots and passes them to the manager + callable. The manager returns a GroupChatDirective indicating either: + - Next participant to speak (with optional instruction) + - Finish signal (with optional final message) + + Message flow topology: + User input -> orchestrator -> manager -> orchestrator -> participant -> orchestrator + (loops until manager returns finish directive) + + Why this design: + - Separates orchestration logic (this class) from selection logic (manager) + - Manager is stateless and testable in isolation + - Orchestrator handles all state mutations and message routing + - Broadcast routing to participants keeps graph structure simple + + Args: + manager: Callable that selects the next participant or finishes based on state snapshot + participants: Mapping of participant names to descriptions (for manager context) + manager_name: Display name for manager in conversation history + max_rounds: Optional limit on manager selection rounds (None = unlimited) + executor_id: Optional custom ID for observability (auto-generated if not provided) + """ + + def __init__( + self, + manager: _GroupChatManagerFn, + *, + participants: Mapping[str, str], + manager_name: str, + max_rounds: int | None = None, + executor_id: str | None = None, + ) -> None: + super().__init__(executor_id or f"groupchat_orchestrator_{uuid4().hex[:8]}") + self._manager = manager + self._participants = dict(participants) + self._manager_name = manager_name + self._max_rounds = max_rounds + self._history: list[_GroupChatTurn] = [] + self._task_message: ChatMessage | None = None + self._pending_agent: str | None = None + # Stashes the initial conversation list until _handle_task_message normalizes it into _conversation. + self._pending_initial_conversation: list[ChatMessage] | None = None + + def _get_author_name(self) -> str: + """Get the manager name for orchestrator-generated messages.""" + return self._manager_name + + def _build_state(self) -> GroupChatStateSnapshot: + """Build a snapshot of current orchestration state for the manager. + + Packages conversation history, participant metadata, and round tracking into + an immutable mapping that the manager uses to make speaker selection decisions. + + Returns: + Mapping containing all context needed for manager decision-making + + Raises: + RuntimeError: If called before task message initialization (defensive check) + + When this is called: + - After initial input is processed (first manager query) + - After each participant response (subsequent manager queries) + """ + if self._task_message is None: + raise RuntimeError("GroupChatOrchestratorExecutor state not initialized with task message.") + snapshot: dict[str, Any] = { + "task": self._task_message, + "participants": dict(self._participants), + "conversation": tuple(self._conversation), + "history": tuple(self._history), + "pending_agent": self._pending_agent, + "round_index": self._round_index, + } + return MappingProxyType(snapshot) + + def _snapshot_pattern_metadata(self) -> dict[str, Any]: + """Serialize GroupChat-specific state for checkpointing. + + Returns: + Dict with participants, manager name, history, and pending agent + """ + return { + "participants": dict(self._participants), + "manager_name": self._manager_name, + "pending_agent": self._pending_agent, + "task_message": self._task_message.to_dict() if self._task_message else None, + "history": [ + {"speaker": turn.speaker, "role": turn.role, "message": turn.message.to_dict()} + for turn in self._history + ], + } + + def _restore_pattern_metadata(self, metadata: dict[str, Any]) -> None: + """Restore GroupChat-specific state from checkpoint. + + Args: + metadata: Pattern-specific state dict + """ + if "participants" in metadata: + self._participants = dict(metadata["participants"]) + if "manager_name" in metadata: + self._manager_name = metadata["manager_name"] + if "pending_agent" in metadata: + self._pending_agent = metadata["pending_agent"] + task_msg = metadata.get("task_message") + if task_msg: + self._task_message = ChatMessage.from_dict(task_msg) + if "history" in metadata: + self._history = [ + _GroupChatTurn( + speaker=turn["speaker"], + role=turn["role"], + message=ChatMessage.from_dict(turn["message"]), + ) + for turn in metadata["history"] + ] + + async def _apply_directive( + self, + directive: GroupChatDirective, + ctx: WorkflowContext[AgentExecutorRequest | _GroupChatRequestMessage, ChatMessage], + ) -> None: + """Execute a manager directive by either finishing the workflow or routing to a participant. + + This is the core routing logic that interprets manager decisions. It handles two cases: + 1. Finish directive: append final message, update state, yield output, become idle + 2. Agent selection: build request envelope, route to participant, increment round counter + + Args: + directive: Manager's decision (finish or select next participant) + ctx: Workflow context for sending messages and yielding output + + Behavior for finish directive: + - Uses provided final_message or creates default completion message + - Ensures author_name is set to manager for attribution + - Appends to conversation and history for complete record + - Yields message as workflow output + - Orchestrator becomes idle (no further processing) + + Behavior for agent selection: + - Validates agent_name exists in participants + - Optionally appends manager instruction as USER message + - Prepares full conversation context for the participant + - Routes request directly to the participant entry executor + - Increments round counter and enforces max_rounds if configured + + Round limit enforcement: + If max_rounds is reached, recursively calls _apply_directive with a finish + directive to gracefully terminate the conversation. + + Raises: + ValueError: If directive lacks agent_name when finish=False, or if + agent_name doesn't match any participant + """ + if directive.finish: + final_message = directive.final_message + if final_message is None: + final_message = self._create_completion_message( + text="Completed without final summary.", + reason="no summary provided", + ) + final_message = ensure_author(final_message, self._manager_name) + + self._conversation.extend((final_message,)) + self._history.append(_GroupChatTurn(self._manager_name, "manager", final_message)) + self._pending_agent = None + await ctx.yield_output(final_message) + return + + agent_name = directive.agent_name + if not agent_name: + raise ValueError("Directive must include agent_name when finish is False.") + if agent_name not in self._participants: + raise ValueError(f"Manager selected unknown participant '{agent_name}'.") + + instruction = directive.instruction or "" + conversation = list(self._conversation) + if instruction: + manager_message = ensure_author( + self._create_completion_message(text=instruction, reason="instruction"), + self._manager_name, + ) + conversation.extend((manager_message,)) + self._conversation.extend((manager_message,)) + self._history.append(_GroupChatTurn(self._manager_name, "manager", manager_message)) + + self._pending_agent = agent_name + self._increment_round() + + # Use inherited routing method from BaseGroupChatOrchestrator + await self._route_to_participant( + participant_name=agent_name, + conversation=conversation, + ctx=ctx, + instruction=instruction, + task=self._task_message, + metadata=directive.metadata, + ) + + if self._check_round_limit(): + await self._apply_directive( + GroupChatDirective( + finish=True, + final_message=self._create_completion_message( + text="Conversation halted after reaching manager round limit.", + reason="max_rounds reached", + ), + ), + ctx, + ) + + async def _ingest_participant_message( + self, + participant_name: str, + message: ChatMessage, + ctx: WorkflowContext[AgentExecutorRequest | _GroupChatRequestMessage, ChatMessage], + ) -> None: + """Common response ingestion logic shared by agent and custom participants.""" + if participant_name not in self._participants: + raise ValueError(f"Received response from unknown participant '{participant_name}'.") + + message = ensure_author(message, participant_name) + self._conversation.extend((message,)) + self._history.append(_GroupChatTurn(participant_name, "agent", message)) + self._pending_agent = None + + if self._check_round_limit(): + await ctx.yield_output( + self._create_completion_message( + text="Conversation halted after reaching manager round limit.", + reason="max_rounds reached after response", + ) + ) + return + + directive = await self._manager(self._build_state()) + await self._apply_directive(directive, ctx) + + @staticmethod + def _extract_agent_message(response: AgentExecutorResponse, participant_name: str) -> ChatMessage: + """Select the final assistant message from an AgentExecutor response.""" + from ._orchestrator_helpers import create_completion_message + + final_message: ChatMessage | None = None + candidate_sequences: tuple[Sequence[ChatMessage] | None, ...] = ( + response.agent_run_response.messages, + response.full_conversation, + ) + for sequence in candidate_sequences: + if not sequence: + continue + for candidate in reversed(sequence): + if candidate.role == Role.ASSISTANT: + final_message = candidate + break + if final_message is not None: + break + + if final_message is None: + final_message = create_completion_message( + text="", + author_name=participant_name, + reason="empty response", + ) + return ensure_author(final_message, participant_name) + + async def _handle_task_message( + self, + task_message: ChatMessage, + ctx: WorkflowContext[AgentExecutorRequest | _GroupChatRequestMessage, ChatMessage], + ) -> None: + """Initialize orchestrator state and start the manager-directed conversation loop. + + This internal method is called by all public handlers (str, ChatMessage, list[ChatMessage]) + after normalizing their input. It initializes conversation state, queries the manager + for the first action, and applies the resulting directive. + + Args: + task_message: The primary user task message (extracted or provided directly) + ctx: Workflow context for sending messages and yielding output + + Behavior: + - Sets task_message for manager context + - Initializes conversation from pending_initial_conversation if present + - Otherwise starts fresh with just the task message + - Builds turn history with speaker attribution + - Resets pending_agent and round_index + - Queries manager for first action + - Applies directive to start the conversation loop + + State initialization: + - _conversation: Full message list for context + - _history: Turn-by-turn record with speaker names and roles + - _pending_agent: None (no active request) + - _round_index: 0 (first manager query) + + Why pending_initial_conversation exists: + The handle_conversation handler supplies an explicit task (the first message in + the list) but still forwards the entire conversation for context. The full list is + stashed in _pending_initial_conversation to preserve all context when initializing state. + """ + self._task_message = task_message + if self._pending_initial_conversation: + initial_conversation = list(self._pending_initial_conversation) + self._pending_initial_conversation = None + self._conversation = initial_conversation + self._history = [ + _GroupChatTurn( + msg.author_name or msg.role.value, + msg.role.value, + msg, + ) + for msg in initial_conversation + ] + else: + self._conversation = [task_message] + self._history = [_GroupChatTurn("user", "user", task_message)] + self._pending_agent = None + self._round_index = 0 + directive = await self._manager(self._build_state()) + await self._apply_directive(directive, ctx) + + @handler + async def handle_str( + self, + task: str, + ctx: WorkflowContext[AgentExecutorRequest | _GroupChatRequestMessage, ChatMessage], + ) -> None: + """Handler for string input as workflow entry point. + + Wraps the string in a USER role ChatMessage and delegates to _handle_task_message. + + Args: + task: Plain text task description from user + ctx: Workflow context + + Usage: + workflow.run("Write a blog post about AI agents") + """ + await self._handle_task_message(ChatMessage(role=Role.USER, text=task), ctx) + + @handler + async def handle_chat_message( + self, + task_message: ChatMessage, + ctx: WorkflowContext[AgentExecutorRequest | _GroupChatRequestMessage, ChatMessage], + ) -> None: + """Handler for ChatMessage input as workflow entry point. + + Directly delegates to _handle_task_message for state initialization. + + Args: + task_message: Structured chat message from user (may include metadata, role, etc.) + ctx: Workflow context + + Usage: + workflow.run(ChatMessage(role=Role.USER, text="Analyze this data")) + """ + await self._handle_task_message(task_message, ctx) + + @handler + async def handle_conversation( + self, + conversation: list[ChatMessage], + ctx: WorkflowContext[AgentExecutorRequest | _GroupChatRequestMessage, ChatMessage], + ) -> None: + """Handler for conversation history as workflow entry point. + + Accepts a pre-existing conversation and uses the first message in the list as the task. + Preserves the full conversation for state initialization. + + Args: + conversation: List of chat messages (system, user, assistant) + ctx: Workflow context + + Raises: + ValueError: If conversation list is empty + + Behavior: + - Validates conversation is non-empty + - Clones conversation to avoid mutation + - Extracts task message (most recent USER message) + - Stashes full conversation in _pending_initial_conversation + - Delegates to _handle_task_message for initialization + + Usage: + existing_messages = [ + ChatMessage(role=Role.SYSTEM, text="You are an expert"), + ChatMessage(role=Role.USER, text="Help me with this task") + ] + workflow.run(existing_messages) + """ + if not conversation: + raise ValueError("GroupChat workflow requires at least one chat message.") + self._pending_initial_conversation = list(conversation) + task_message = latest_user_message(conversation) + await self._handle_task_message(task_message, ctx) + + @handler + async def handle_agent_response( + self, + response: _GroupChatResponseMessage, + ctx: WorkflowContext[AgentExecutorRequest | _GroupChatRequestMessage, ChatMessage], + ) -> None: + """Handle responses from custom participant executors.""" + await self._ingest_participant_message(response.agent_name, response.message, ctx) + + @handler + async def handle_agent_executor_response( + self, + response: AgentExecutorResponse, + ctx: WorkflowContext[AgentExecutorRequest | _GroupChatRequestMessage, ChatMessage], + ) -> None: + """Handle direct AgentExecutor responses.""" + participant_name = self._registry.get_participant_name(response.executor_id) + if participant_name is None: + logger.debug( + "Ignoring response from unregistered agent executor '%s'.", + response.executor_id, + ) + return + message = self._extract_agent_message(response, participant_name) + await self._ingest_participant_message(participant_name, message, ctx) + + +def _default_orchestrator_factory(wiring: _GroupChatConfig) -> Executor: + """Default factory for creating the GroupChatOrchestratorExecutor instance. + + This is the internal implementation used by GroupChatBuilder to instantiate the + orchestrator. It extracts participant descriptions from the wiring configuration + and passes them to the orchestrator for manager context. + + Args: + wiring: Complete workflow configuration assembled by the builder + + Returns: + Initialized GroupChatOrchestratorExecutor ready to coordinate the conversation + + Behavior: + - Extracts participant names and descriptions for manager context + - Forwards manager instance, manager name, and max_rounds settings + - Allows orchestrator to auto-generate its executor ID + + Why descriptions are extracted: + The manager needs participant descriptions (not full specs) to make informed + selection decisions. The orchestrator doesn't need participant instances directly + since routing is handled by the workflow graph. + + Raises: + RuntimeError: If manager is None (should not happen when using default factory) + """ + if wiring.manager is None: + raise RuntimeError("Default orchestrator factory requires a manager to be set") + + return GroupChatOrchestratorExecutor( + manager=wiring.manager, + participants={name: spec.description for name, spec in wiring.participants.items()}, + manager_name=wiring.manager_name, + max_rounds=wiring.max_rounds, + ) + + +def group_chat_orchestrator(factory: _GroupChatOrchestratorFactory | None = None) -> _GroupChatOrchestratorFactory: + """Return a callable orchestrator factory, defaulting to the built-in implementation.""" + return factory or _default_orchestrator_factory + + +def assemble_group_chat_workflow( + *, + wiring: _GroupChatConfig, + participant_factory: Callable[[GroupChatParticipantSpec, _GroupChatConfig], _GroupChatParticipantPipeline], + orchestrator_factory: _GroupChatOrchestratorFactory = _default_orchestrator_factory, + interceptors: Sequence[_InterceptorSpec] | None = None, + checkpoint_storage: CheckpointStorage | None = None, + builder: WorkflowBuilder | None = None, + return_builder: bool = False, +) -> Workflow | tuple[WorkflowBuilder, Executor]: + """Build the workflow graph shared by group-chat style orchestrators.""" + interceptor_specs = interceptors or () + + orchestrator = wiring.orchestrator or orchestrator_factory(wiring) + wiring.orchestrator = orchestrator + + workflow_builder = builder or WorkflowBuilder() + workflow_builder = workflow_builder.set_start_executor(orchestrator) + + for name, spec in wiring.participants.items(): + pipeline = list(participant_factory(spec, wiring)) + if not pipeline: + raise ValueError( + f"Participant factory returned an empty pipeline for '{name}'. " + "Provide at least one executor per participant." + ) + entry_executor = pipeline[0] + exit_executor = pipeline[-1] + + register_entry = getattr(orchestrator, "register_participant_entry", None) + if callable(register_entry): + register_entry( + name, + entry_id=entry_executor.id, + is_agent=not isinstance(spec.participant, Executor), + ) + + workflow_builder = workflow_builder.add_edge(orchestrator, entry_executor) + for upstream, downstream in itertools.pairwise(pipeline): + workflow_builder = workflow_builder.add_edge(upstream, downstream) + if exit_executor is not orchestrator: + workflow_builder = workflow_builder.add_edge(exit_executor, orchestrator) + + for factory, condition in interceptor_specs: + interceptor_executor = factory(wiring) + workflow_builder = workflow_builder.add_edge(orchestrator, interceptor_executor, condition=condition) + workflow_builder = workflow_builder.add_edge(interceptor_executor, orchestrator) + + if checkpoint_storage is not None: + workflow_builder = workflow_builder.with_checkpointing(checkpoint_storage) + + if return_builder: + return workflow_builder, orchestrator + return workflow_builder.build() + + +# endregion + + +# region Builder + + +class GroupChatBuilder: + r"""High-level builder for manager-directed group chat workflows with dynamic orchestration. + + GroupChat coordinates multi-agent conversations using a manager that selects which participant + speaks next. The manager can be a simple Python function (select_speakers) or an LLM-based + selector (set_prompt_based_manager). These two approaches are mutually exclusive. + + **Core Workflow:** + 1. Define participants: list of agents (uses their .name) or dict mapping names to agents + 2. Configure speaker selection: select_speakers() OR set_prompt_based_manager() (not both) + 3. Optional: set round limits, checkpointing, termination conditions + 4. Build and run the workflow + + **Speaker Selection Patterns:** + + *Pattern 1: Simple function-based selection (recommended)* + + .. code-block:: python + + def select_next_speaker(state: GroupChatStateSnapshot) -> str | None: + # state contains: task, participants, conversation, history, round_index + if state["round_index"] >= 5: + return None # Finish + last_speaker = state["history"][-1].speaker if state["history"] else None + if last_speaker == "researcher": + return "writer" + return "researcher" + + + workflow = ( + GroupChatBuilder() + .select_speakers(select_next_speaker) + .participants([researcher_agent, writer_agent]) # Uses agent.name + .build() + ) + + *Pattern 2: LLM-based selection* + + .. code-block:: python + + from agent_framework.azure import AzureOpenAIChatClient + + workflow = ( + GroupChatBuilder() + .set_prompt_based_manager(chat_client=AzureOpenAIChatClient(), display_name="Coordinator") + .participants([researcher, writer]) # Or use dict: researcher=r, writer=w + .with_max_rounds(10) + .build() + ) + + **Participant Specification:** + + Two ways to specify participants: + - List form: ``[agent1, agent2]`` - uses ``agent.name`` attribute for participant names + - Dict form: ``{name1: agent1, name2: agent2}`` - explicit name control + - Keyword form: ``participants(name1=agent1, name2=agent2)`` - explicit name control + + **State Snapshot Structure:** + + The GroupChatStateSnapshot passed to select_speakers contains: + - ``task``: ChatMessage - Original user task + - ``participants``: dict[str, str] - Mapping of participant names to descriptions + - ``conversation``: tuple[ChatMessage, ...] - Full conversation history + - ``history``: tuple[GroupChatTurn, ...] - Turn-by-turn record with speaker attribution + - ``round_index``: int - Number of manager selection rounds so far + - ``pending_agent``: str | None - Name of agent currently processing (if any) + + **Important Constraints:** + - Cannot combine select_speakers() and set_prompt_based_manager() - choose one + - Participant names must be unique + - When using list form, agents must have a non-empty ``name`` attribute + """ + + def __init__( + self, + *, + _orchestrator_factory: _GroupChatOrchestratorFactory | None = None, + _participant_factory: Callable[[GroupChatParticipantSpec, _GroupChatConfig], _GroupChatParticipantPipeline] + | None = None, + ) -> None: + """Initialize the GroupChatBuilder. + + Args: + _orchestrator_factory: Internal extension point for custom orchestrator implementations. + Used by Magentic. Not part of public API - subject to change. + _participant_factory: Internal extension point for custom participant pipelines. + Used by Magentic. Not part of public API - subject to change. + """ + self._participants: dict[str, AgentProtocol | Executor] = {} + self._participant_metadata: dict[str, Any] | None = None + self._manager: _GroupChatManagerFn | None = None + self._manager_name: str = "manager" + self._checkpoint_storage: CheckpointStorage | None = None + self._max_rounds: int | None = None + self._interceptors: list[_InterceptorSpec] = [] + self._orchestrator_factory = group_chat_orchestrator(_orchestrator_factory) + self._participant_factory = _participant_factory or _default_participant_factory + + def _set_manager_function( + self, + manager: _GroupChatManagerFn, + display_name: str | None, + ) -> "GroupChatBuilder": + if self._manager is not None: + raise ValueError( + "GroupChatBuilder already has a manager configured. " + "Call select_speakers(...) or set_prompt_based_manager(...) at most once." + ) + resolved_name = display_name or getattr(manager, "name", None) or "manager" + self._manager = manager + self._manager_name = resolved_name + return self + + def set_prompt_based_manager( + self, + chat_client: ChatClientProtocol, + *, + instructions: str | None = None, + display_name: str | None = None, + ) -> "GroupChatBuilder": + r"""Configure the default prompt-based manager driven by an LLM chat client. + + The manager coordinates participants by making selection decisions based on the conversation + state, task, and participant descriptions. It uses structured output (ManagerDirectiveModel) + to ensure reliable parsing of decisions. + + Args: + chat_client: Chat completion client used to run the coordinator LLM. + instructions: System instructions to steer the coordinator's decision-making. + If not provided, uses DEFAULT_MANAGER_INSTRUCTIONS. These instructions are combined + with the task description, participant list, and structured output format to guide + the LLM in selecting the next speaker or completing the conversation. + display_name: Optional conversational display name for manager messages. + + Returns: + Self for fluent chaining. + + Note: + Calling this method and :meth:`set_speaker_selector` together is not allowed; choose one. + + Example: + + .. code-block:: python + + from agent_framework import GroupChatBuilder, DEFAULT_MANAGER_INSTRUCTIONS + + custom_instructions = ( + DEFAULT_MANAGER_INSTRUCTIONS + "\\n\\nPrioritize the researcher for data analysis tasks." + ) + + workflow = ( + GroupChatBuilder() + .set_prompt_based_manager(chat_client, instructions=custom_instructions, display_name="Coordinator") + .participants(researcher=researcher, writer=writer) + .build() + ) + """ + manager = _PromptBasedGroupChatManager( + chat_client, + instructions=instructions, + name=display_name, + ) + return self._set_manager_function(manager, display_name) + + def select_speakers( + self, + selector: ( + Callable[[GroupChatStateSnapshot], Awaitable[str | None]] | Callable[[GroupChatStateSnapshot], str | None] + ), + *, + display_name: str | None = None, + final_message: ChatMessage | str | Callable[[GroupChatStateSnapshot], Any] | None = None, + ) -> "GroupChatBuilder": + """Configure speaker selection using a pure function that examines group chat state. + + This is the primary way to control orchestration flow in a GroupChat. Your selector + function receives an immutable snapshot of the current conversation state and returns + the name of the next participant to speak, or None to finish the conversation. + + The selector function signature: + def select_next_speaker(state: GroupChatStateSnapshot) -> str | None: + # state contains: task, participants, conversation, history, round_index + # Return participant name to continue, or None to finish + ... + + Args: + selector: Function that takes GroupChatStateSnapshot and returns the next speaker's + name (str) to continue the conversation, or None to finish. May be sync or async. + display_name: Optional name shown in conversation history for orchestrator messages + (defaults to "manager"). + final_message: Optional final message (or factory) emitted when selector returns None + (defaults to "Conversation completed." authored by the manager). + + Returns: + Self for fluent chaining. + + Example: + + .. code-block:: python + + def select_next_speaker(state: GroupChatStateSnapshot) -> str | None: + if state["round_index"] >= 3: + return None # Finish after 3 rounds + last_speaker = state["history"][-1].speaker if state["history"] else None + if last_speaker == "researcher": + return "writer" + return "researcher" + + + workflow = ( + GroupChatBuilder() + .select_speakers(select_next_speaker) + .participants(researcher=researcher_agent, writer=writer_agent) + .build() + ) + + Note: + Cannot be combined with set_prompt_based_manager(). Choose one orchestration strategy. + """ + manager_name = display_name or "manager" + adapter = _SpeakerSelectorAdapter( + selector, + manager_name=manager_name, + final_message=final_message, + ) + return self._set_manager_function(adapter, display_name) + + def participants( + self, + participants: Mapping[str, AgentProtocol | Executor] | Sequence[AgentProtocol | Executor] | None = None, + /, + **named_participants: AgentProtocol | Executor, + ) -> "GroupChatBuilder": + """Define participants for this group chat workflow. + + Accepts AgentProtocol instances (auto-wrapped as AgentExecutor) or Executor instances. + Provide a mapping of name → participant for explicit control, or pass a sequence and + names will be inferred from the agent's name attribute (or executor id). + + Args: + participants: Optional mapping or sequence of participant definitions + **named_participants: Keyword arguments mapping names to agent/executor instances + + Returns: + Self for fluent chaining + + Raises: + ValueError: If participants are empty, names are duplicated, or names are empty strings + + Usage: + + .. code-block:: python + + from agent_framework import GroupChatBuilder + + workflow = ( + GroupChatBuilder() + .set_prompt_based_manager(chat_client) + .participants([writer_agent, reviewer_agent]) + .build() + ) + """ + combined: dict[str, AgentProtocol | Executor] = {} + + def _add(name: str, participant: AgentProtocol | Executor) -> None: + if not name: + raise ValueError("participant names must be non-empty strings") + if name in combined or name in self._participants: + raise ValueError(f"Duplicate participant name '{name}' supplied.") + combined[name] = participant + + if participants: + if isinstance(participants, Mapping): + for name, participant in participants.items(): + _add(name, participant) + else: + for participant in participants: + inferred_name: str + if isinstance(participant, Executor): + inferred_name = participant.id + else: + name_attr = getattr(participant, "name", None) + if not name_attr: + raise ValueError( + "Agent participants supplied via sequence must define a non-empty 'name' attribute." + ) + inferred_name = str(name_attr) + _add(inferred_name, participant) + + for name, participant in named_participants.items(): + _add(name, participant) + + if not combined: + raise ValueError("participants cannot be empty") + + for name, participant in combined.items(): + self._participants[name] = participant + self._participant_metadata = None + return self + + def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "GroupChatBuilder": + """Enable checkpointing for the built workflow using the provided storage. + + Checkpointing allows the workflow to persist state and resume from interruption + points, enabling long-running conversations and failure recovery. + + Args: + checkpoint_storage: Storage implementation for persisting workflow state + + Returns: + Self for fluent chaining + + Usage: + + .. code-block:: python + + from agent_framework import GroupChatBuilder, MemoryCheckpointStorage + + storage = MemoryCheckpointStorage() + workflow = ( + GroupChatBuilder() + .set_prompt_based_manager(chat_client) + .participants(agent1=agent1, agent2=agent2) + .with_checkpointing(storage) + .build() + ) + """ + self._checkpoint_storage = checkpoint_storage + return self + + def with_request_handler( + self, + handler: Callable[[_GroupChatConfig], Executor] | Executor, + *, + condition: Callable[[Any], bool], + ) -> "GroupChatBuilder": + """Register an interceptor factory that creates executors for special requests. + + Args: + handler: Callable that receives the wiring and returns an executor, or a pre-built executor + condition: Filter determining which orchestrator messages the interceptor should process + + Returns: + Self for fluent chaining + """ + factory: Callable[[_GroupChatConfig], Executor] + if isinstance(handler, Executor): + executor = handler + + def _factory(_: _GroupChatConfig) -> Executor: + return executor + + factory = _factory + else: + factory = handler + + self._interceptors.append((factory, condition)) + return self + + def with_max_rounds(self, max_rounds: int | None) -> "GroupChatBuilder": + """Set a maximum number of manager rounds to prevent infinite conversations. + + When the round limit is reached, the workflow automatically completes with + a default completion message. Setting to None allows unlimited rounds. + + Args: + max_rounds: Maximum number of manager selection rounds, or None for unlimited + + Returns: + Self for fluent chaining + + Usage: + + .. code-block:: python + + from agent_framework import GroupChatBuilder + + # Limit to 15 rounds + workflow = ( + GroupChatBuilder() + .set_prompt_based_manager(chat_client) + .participants(agent1=agent1, agent2=agent2) + .with_max_rounds(15) + .build() + ) + + # Unlimited rounds + workflow = ( + GroupChatBuilder() + .set_prompt_based_manager(chat_client) + .participants(agent1=agent1) + .with_max_rounds(None) + .build() + ) + """ + self._max_rounds = max_rounds + return self + + def _get_participant_metadata(self) -> dict[str, Any]: + if self._participant_metadata is None: + self._participant_metadata = prepare_participant_metadata( + self._participants, + executor_id_factory=lambda name, participant: ( + participant.id if isinstance(participant, Executor) else f"groupchat_agent:{name}" + ), + description_factory=lambda name, participant: ( + participant.id if isinstance(participant, Executor) else participant.__class__.__name__ + ), + ) + return self._participant_metadata + + def _build_participant_specs(self) -> dict[str, GroupChatParticipantSpec]: + metadata = self._get_participant_metadata() + descriptions: Mapping[str, str] = metadata["descriptions"] + specs: dict[str, GroupChatParticipantSpec] = {} + for name, participant in self._participants.items(): + specs[name] = GroupChatParticipantSpec( + name=name, + participant=participant, + description=descriptions[name], + ) + return specs + + def build(self) -> Workflow: + """Build and validate the group chat workflow. + + Assembles the orchestrator, participants, and their interconnections into + a complete workflow graph. The orchestrator delegates speaker selection to + the manager, routes requests to the appropriate participants, and collects + their responses to continue or complete the conversation. + + Returns: + Validated Workflow instance ready for execution + + Raises: + ValueError: If manager or participants are not configured (when using default factory) + + Wiring pattern: + - Orchestrator receives initial input (str, ChatMessage, or list[ChatMessage]) + - Orchestrator queries manager for next action (participant selection or finish) + - If participant selected: request routed directly to participant entry node + - Participant pipeline: AgentExecutor for agents or custom executor chains + - Participant response flows back to orchestrator + - Orchestrator updates state and queries manager again + - When manager returns finish directive: orchestrator yields final message and becomes idle + + Usage: + + .. code-block:: python + + from agent_framework import GroupChatBuilder + + # Execute the workflow + workflow = ( + GroupChatBuilder() + .set_prompt_based_manager(chat_client) + .participants(agent1=agent1, agent2=agent2) + .build() + ) + async for message in workflow.run("Solve this problem collaboratively"): + print(message.text) + """ + # Manager is only required when using the default orchestrator factory + # Custom factories (e.g., MagenticBuilder) provide their own orchestrator with embedded manager + if self._manager is None and self._orchestrator_factory == _default_orchestrator_factory: + raise ValueError("manager must be configured before build() when using default orchestrator") + if not self._participants: + raise ValueError("participants must be configured before build()") + + metadata = self._get_participant_metadata() + participant_specs = self._build_participant_specs() + wiring = _GroupChatConfig( + manager=self._manager, + manager_name=self._manager_name, + participants=participant_specs, + max_rounds=self._max_rounds, + participant_aliases=metadata["aliases"], + participant_executors=metadata["executors"], + ) + + result = assemble_group_chat_workflow( + wiring=wiring, + participant_factory=self._participant_factory, + orchestrator_factory=self._orchestrator_factory, + interceptors=self._interceptors, + checkpoint_storage=self._checkpoint_storage, + ) + if not isinstance(result, Workflow): + raise TypeError("Expected Workflow from assemble_group_chat_workflow") + return result + + +# endregion + + +# region Default manager implementation + + +DEFAULT_MANAGER_INSTRUCTIONS = """You are coordinating a team conversation to solve the user's task. +Your role is to orchestrate collaboration between multiple participants by selecting who speaks next. +Leverage each participant's unique expertise as described in their descriptions. +Have participants build on each other's contributions - earlier participants gather information, +later ones refine and synthesize. +Only finish the task after multiple relevant participants have contributed their expertise.""" + +DEFAULT_MANAGER_STRUCTURED_OUTPUT_PROMPT = """Return your decision using the following structure: +- next_agent: name of the participant who should act next (use null when finish is true) +- message: instruction for that participant (empty string if not needed) +- finish: boolean indicating if the task is complete +- final_response: when finish is true, provide the final answer to the user""" + + +class ManagerDirectiveModel(BaseModel): + """Pydantic model for structured manager directive output.""" + + next_agent: str | None = Field( + default=None, + description="Name of the participant who should act next (null when finish is true)", + ) + message: str = Field( + default="", + description="Instruction for the selected participant", + ) + finish: bool = Field( + default=False, + description="Whether the task is complete", + ) + final_response: str | None = Field( + default=None, + description="Final answer to the user when finish is true", + ) + + +class _PromptBasedGroupChatManager: + """LLM-backed manager that produces directives via structured output. + + This is the default manager implementation for group chat workflows. It uses an LLM + to make speaker selection decisions based on conversation state, participant + descriptions, and custom instructions. + + Coordination strategy: + - Receives immutable state snapshot with full conversation history + - Formats system prompt with instructions, task, and participant descriptions + - Appends conversation context and uses structured output (Pydantic model) for reliable parsing + - Converts LLM response to GroupChatDirective + + Flexibility: + - Custom instructions allow domain-specific coordination strategies + - Participant descriptions guide the LLM's selection logic + - Structured output ensures reliable parsing (no regex or brittle prompts) + + Example coordination patterns: + - Round-robin: "Rotate between participants in order" + - Task-based: "Select the participant best suited for the current sub-task" + - Dependency-aware: "Only call analyst after researcher provides data" + + Args: + chat_client: ChatClientProtocol implementation for LLM inference + instructions: Custom system instructions (defaults to DEFAULT_MANAGER_INSTRUCTIONS). + These instructions are combined with the task, participant list, and + structured output format (ManagerDirectiveModel) to coordinate the conversation. + name: Display name for the manager in conversation history + + Raises: + RuntimeError: If LLM response cannot be parsed into the directive payload + If directive is missing next_agent when finish=False + If selected agent is not in participants + """ + + def __init__( + self, + chat_client: ChatClientProtocol, + *, + instructions: str | None = None, + name: str | None = None, + ) -> None: + self._chat_client = chat_client + self._instructions = instructions or DEFAULT_MANAGER_INSTRUCTIONS + self._name = name or "GroupChatManager" + + @property + def name(self) -> str: + return self._name + + async def __call__(self, state: GroupChatStateSnapshot) -> GroupChatDirective: + participants = state["participants"] + task_message = state["task"] + conversation = state["conversation"] + + participants_section = "\n".join(f"- {agent}: {description}" for agent, description in participants.items()) + + system_message = ChatMessage( + role=Role.SYSTEM, + text=( + f"{self._instructions}\n\n" + f"Task:\n{task_message.text}\n\n" + f"Participants:\n{participants_section}\n\n" + f"{DEFAULT_MANAGER_STRUCTURED_OUTPUT_PROMPT}" + ), + ) + + messages: list[ChatMessage] = [system_message, *conversation] + + response = await self._chat_client.get_response(messages, response_format=ManagerDirectiveModel) + + directive_model: ManagerDirectiveModel + if response.value is not None: + if isinstance(response.value, ManagerDirectiveModel): + directive_model = response.value + elif isinstance(response.value, str): + directive_model = ManagerDirectiveModel.model_validate_json(response.value) + elif isinstance(response.value, dict): + directive_model = ManagerDirectiveModel.model_validate(response.value) # type: ignore[arg-type] + else: + raise RuntimeError(f"Unexpected response.value type: {type(response.value)}") + elif response.messages: + text = response.messages[-1].text or "{}" + directive_model = ManagerDirectiveModel.model_validate_json(text) + else: + raise RuntimeError("LLM response did not contain structured output.") + + if directive_model.finish: + final_text = directive_model.final_response or "" + return GroupChatDirective( + finish=True, + final_message=ChatMessage( + role=Role.ASSISTANT, + text=final_text, + author_name=self._name, + ), + ) + + next_agent = directive_model.next_agent + if not next_agent: + raise RuntimeError("Manager directive missing next_agent while finish is False.") + if next_agent not in participants: + raise RuntimeError(f"Manager selected unknown participant '{next_agent}'.") + + return GroupChatDirective( + agent_name=next_agent, + instruction=directive_model.message or "", + ) + + +class _SpeakerSelectorAdapter: + """Adapter that turns a simple speaker selector into a full manager directive.""" + + def __init__( + self, + selector: Callable[[GroupChatStateSnapshot], Awaitable[Any]] | Callable[[GroupChatStateSnapshot], Any], + *, + manager_name: str, + final_message: ChatMessage | str | Callable[[GroupChatStateSnapshot], Any] | None = None, + ) -> None: + self._selector = selector + self._manager_name = manager_name + self._final_message = final_message + self.name = manager_name + + async def __call__(self, state: GroupChatStateSnapshot) -> GroupChatDirective: + result = await _maybe_await(self._selector(state)) + if result is None: + message = await self._resolve_final_message(state) + return GroupChatDirective(finish=True, final_message=message) + + if isinstance(result, Sequence) and not isinstance(result, (str, bytes, bytearray)): + if not result: + message = await self._resolve_final_message(state) + return GroupChatDirective(finish=True, final_message=message) + if len(result) != 1: # type: ignore[arg-type] + raise ValueError("Speaker selector must return a single participant name or None.") + first_item = result[0] # type: ignore[index] + if not isinstance(first_item, str): + raise TypeError("Speaker selector must return a participant name (str) or None.") + result = first_item + + if not isinstance(result, str): + raise TypeError("Speaker selector must return a participant name (str) or None.") + + return GroupChatDirective(agent_name=result) + + async def _resolve_final_message(self, state: GroupChatStateSnapshot) -> ChatMessage: + final_message = self._final_message + if callable(final_message): + value = await _maybe_await(final_message(state)) + else: + value = final_message + + if value is None: + message = ChatMessage( + role=Role.ASSISTANT, + text="Conversation completed.", + author_name=self._manager_name, + ) + elif isinstance(value, ChatMessage): + message = value + else: + message = ChatMessage( + role=Role.ASSISTANT, + text=str(value), + author_name=self._manager_name, + ) + + if not message.author_name: + patch = message.to_dict() + patch["author_name"] = self._manager_name + message = ChatMessage.from_dict(patch) + return message + + +# endregion diff --git a/python/packages/core/agent_framework/_workflows/_handoff.py b/python/packages/core/agent_framework/_workflows/_handoff.py index 725e50cb25..11076e3d14 100644 --- a/python/packages/core/agent_framework/_workflows/_handoff.py +++ b/python/packages/core/agent_framework/_workflows/_handoff.py @@ -35,9 +35,16 @@ from .._agents import ChatAgent from .._middleware import FunctionInvocationContext, FunctionMiddleware from ._agent_executor import AgentExecutor, AgentExecutorRequest, AgentExecutorResponse +from ._base_group_chat_orchestrator import BaseGroupChatOrchestrator from ._checkpoint import CheckpointStorage -from ._conversation_state import decode_chat_messages, encode_chat_messages from ._executor import Executor, handler +from ._group_chat import ( + _default_participant_factory, # type: ignore[reportPrivateUsage] + _GroupChatConfig, # type: ignore[reportPrivateUsage] + assemble_group_chat_workflow, +) +from ._orchestrator_helpers import clean_conversation_for_handoff +from ._participant_utils import GroupChatParticipantSpec, prepare_participant_metadata, sanitize_identifier from ._request_info_executor import RequestInfoExecutor, RequestInfoMessage, RequestResponse from ._workflow import Workflow from ._workflow_builder import WorkflowBuilder @@ -49,19 +56,9 @@ _HANDOFF_TOOL_PATTERN = re.compile(r"(?:handoff|transfer)[_\s-]*to[_\s-]*(?P[\w-]+)", re.IGNORECASE) -def _sanitize_alias(value: str) -> str: - """Normalise an agent alias into a lowercase identifier-safe string.""" - cleaned = re.sub(r"[^0-9a-zA-Z]+", "_", value).strip("_") - if not cleaned: - cleaned = "agent" - if cleaned[0].isdigit(): - cleaned = f"agent_{cleaned}" - return cleaned.lower() - - def _create_handoff_tool(alias: str, description: str | None = None) -> AIFunction[Any, Any]: """Construct the synthetic handoff tool that signals routing to `alias`.""" - sanitized = _sanitize_alias(alias) + sanitized = sanitize_identifier(alias) tool_name = f"handoff_to_{sanitized}" doc = description or f"Handoff to the {alias} agent." @@ -257,7 +254,7 @@ def _target_from_tool_name(name: str | None) -> str | None: return None -class _HandoffCoordinator(Executor): +class _HandoffCoordinator(BaseGroupChatOrchestrator): """Coordinates agent-to-agent transfers and user turn requests.""" def __init__( @@ -266,7 +263,7 @@ def __init__( starting_agent_id: str, specialist_ids: Mapping[str, str], input_gateway_id: str, - termination_condition: Callable[[list[ChatMessage]], bool], + termination_condition: Callable[[list[ChatMessage]], bool | Awaitable[bool]], id: str, handoff_tool_targets: Mapping[str, str] | None = None, ) -> None: @@ -277,9 +274,12 @@ def __init__( self._specialist_ids = set(specialist_ids.values()) self._input_gateway_id = input_gateway_id self._termination_condition = termination_condition - self._full_conversation: list[ChatMessage] = [] self._handoff_tool_targets = {k.lower(): v for k, v in (handoff_tool_targets or {}).items()} + def _get_author_name(self) -> str: + """Get the coordinator name for orchestrator-generated messages.""" + return "handoff_coordinator" + @handler async def handle_agent_response( self, @@ -290,38 +290,39 @@ async def handle_agent_response( # Hydrate coordinator state (and detect new run) using checkpointable executor state state = await ctx.get_executor_state() if not state: - self._full_conversation = [] - elif not self._full_conversation: + self._clear_conversation() + elif not self._get_conversation(): restored = self._restore_conversation_from_state(state) if restored: - self._full_conversation = restored + self._conversation = list(restored) source = ctx.get_source_executor_id() is_starting_agent = source == self._starting_agent_id - # On first turn of a run, full_conversation is empty + # On first turn of a run, conversation is empty # Track new messages only, build authoritative history incrementally - if not self._full_conversation: + conversation_msgs = self._get_conversation() + if not conversation_msgs: # First response from starting agent - initialize with authoritative conversation snapshot # Keep the FULL conversation including tool calls (OpenAI SDK default behavior) full_conv = self._conversation_from_response(response) - self._full_conversation = list(full_conv) + self._conversation = list(full_conv) else: # Subsequent responses - append only new messages from this agent # Keep ALL messages including tool calls to maintain complete history - new_messages = list(response.agent_run_response.messages) - self._full_conversation.extend(new_messages) + new_messages = response.agent_run_response.messages or [] + self._conversation.extend(new_messages) - self._apply_response_metadata(self._full_conversation, response.agent_run_response) + self._apply_response_metadata(self._conversation, response.agent_run_response) - conversation = list(self._full_conversation) + conversation = list(self._conversation) # Check for handoff from ANY agent (starting agent or specialist) target = self._resolve_specialist(response.agent_run_response, conversation) if target is not None: await self._persist_state(ctx) # Clean tool-related content before sending to next agent - cleaned = self._get_cleaned_conversation(conversation) + cleaned = clean_conversation_for_handoff(conversation) request = AgentExecutorRequest(messages=cleaned, should_respond=True) await ctx.send_message(request, target_id=target) return @@ -332,7 +333,7 @@ async def handle_agent_response( await self._persist_state(ctx) - if self._termination_condition(conversation): + if await self._check_termination(): logger.info("Handoff workflow termination condition met. Ending conversation.") await ctx.yield_output(list(conversation)) return @@ -346,18 +347,18 @@ async def handle_user_input( ctx: WorkflowContext[AgentExecutorRequest, list[ChatMessage]], ) -> None: """Receive full conversation with new user input from gateway, update history, trim for agent.""" - # Update authoritative full conversation - self._full_conversation = list(message.full_conversation) + # Update authoritative conversation + self._conversation = list(message.full_conversation) await self._persist_state(ctx) # Check termination before sending to agent - if self._termination_condition(self._full_conversation): + if await self._check_termination(): logger.info("Handoff workflow termination condition met. Ending conversation.") - await ctx.yield_output(list(self._full_conversation)) + await ctx.yield_output(list(self._conversation)) return # Clean before sending to starting agent - cleaned = self._get_cleaned_conversation(self._full_conversation) + cleaned = clean_conversation_for_handoff(self._conversation) request = AgentExecutorRequest(messages=cleaned, should_respond=True) await ctx.send_message(request, target_id=self._starting_agent_id) @@ -409,8 +410,8 @@ def _append_tool_acknowledgement( author_name=function_call.name, ) # Add tool acknowledgement to both the conversation being sent and the full history - conversation.append(tool_message) - self._full_conversation.append(tool_message) + conversation.extend((tool_message,)) + self._append_messages((tool_message,)) def _conversation_from_response(self, response: AgentExecutorResponse) -> list[ChatMessage]: """Return the authoritative conversation snapshot from an executor response.""" @@ -421,78 +422,41 @@ def _conversation_from_response(self, response: AgentExecutorResponse) -> list[C ) return list(conversation) - def _get_cleaned_conversation(self, conversation: list[ChatMessage]) -> list[ChatMessage]: - """Create a cleaned copy of conversation with tool-related content removed. + async def _persist_state(self, ctx: WorkflowContext[Any, Any]) -> None: + """Store authoritative conversation snapshot without losing rich metadata.""" + state_payload = self.snapshot_state() + await ctx.set_executor_state(state_payload) - This method creates a copy of the conversation and removes tool-related content - before passing it to agents. The original conversation is preserved for handoff - detection and state management. + def _snapshot_pattern_metadata(self) -> dict[str, Any]: + """Serialize pattern-specific state. - During handoffs, tool calls (including handoff tools) cause OpenAI API errors. The OpenAI - API requires that: - 1. Assistant messages with tool_calls must be followed by corresponding tool responses - 2. Tool response messages must follow an assistant message with tool_calls + Handoff has no additional metadata beyond base conversation state. - To avoid these errors, we remove ALL tool-related content from the conversation: - - FunctionApprovalRequestContent and FunctionCallContent from assistant messages - - Tool response messages (Role.TOOL) + Returns: + Empty dict (no pattern-specific state) + """ + return {} - This follows the pattern from OpenAI Agents SDK's `remove_all_tools` filter, which strips - all tool-related content from conversation history during handoffs. + def _restore_pattern_metadata(self, metadata: dict[str, Any]) -> None: + """Restore pattern-specific state. - Removes: - - FunctionApprovalRequestContent: Approval requests for tools - - FunctionCallContent: Tool calls made by the agent - - Tool response messages (Role.TOOL with FunctionResultContent) - - Messages with only tool calls and no text content + Handoff has no additional metadata beyond base conversation state. - Preserves: - - User messages - - Assistant messages with text content (tool calls are stripped out) + Args: + metadata: Pattern-specific state dict (ignored) """ - # Create a copy to avoid modifying the original - cleaned: list[ChatMessage] = [] - for msg in conversation: - # Skip tool response messages - they must be paired with tool calls which we're removing - if msg.role == Role.TOOL: - continue - - # Check if message has tool-related content - has_tool_content = False - if msg.contents: - has_tool_content = any( - isinstance(content, (FunctionApprovalRequestContent, FunctionCallContent)) - for content in msg.contents - ) + pass - # If no tool content, keep the original message - if not has_tool_content: - cleaned.append(msg) - continue - - # Message has tool content - only keep if it also has text - if msg.text and msg.text.strip(): - # Create fresh text-only message to avoid tool_calls being regenerated - msg_copy = ChatMessage( - role=msg.role, - text=msg.text, - author_name=msg.author_name, - ) - cleaned.append(msg_copy) - - return cleaned + def _restore_conversation_from_state(self, state: Mapping[str, Any]) -> list[ChatMessage]: + """Rehydrate the coordinator's conversation history from checkpointed state. - async def _persist_state(self, ctx: WorkflowContext[Any, Any]) -> None: - """Store authoritative conversation snapshot without losing rich metadata.""" - state_payload = {"full_conversation": encode_chat_messages(self._full_conversation)} - await ctx.set_executor_state(state_payload) + DEPRECATED: Use restore_state() instead. Kept for backward compatibility. + """ + from ._orchestration_state import OrchestrationState - def _restore_conversation_from_state(self, state: Mapping[str, Any]) -> list[ChatMessage]: - """Rehydrate the coordinator's conversation history from checkpointed state.""" - raw_conv = state.get("full_conversation") - if not isinstance(raw_conv, list): - return [] - return decode_chat_messages(raw_conv) # type: ignore[arg-type] + orch_state_dict = {"conversation": state.get("full_conversation", state.get("conversation", []))} + temp_state = OrchestrationState.from_dict(orch_state_dict) + return list(temp_state.conversation) def _apply_response_metadata(self, conversation: list[ChatMessage], agent_response: AgentRunResponse) -> None: """Merge top-level response metadata into the latest assistant message.""" @@ -766,7 +730,10 @@ def __init__( self._starting_agent_id: str | None = None self._checkpoint_storage: CheckpointStorage | None = None self._request_prompt: str | None = None - self._termination_condition: Callable[[list[ChatMessage]], bool] = _default_termination_condition + # Termination condition + self._termination_condition: Callable[[list[ChatMessage]], bool | Awaitable[bool]] = ( + _default_termination_condition + ) self._auto_register_handoff_tools: bool = True self._handoff_config: dict[str, list[str]] = {} # Maps agent_id -> [target_agent_ids] @@ -814,36 +781,41 @@ def participants(self, participants: Sequence[AgentProtocol | Executor]) -> "Han if not participants: raise ValueError("participants cannot be empty") - wrapped: list[Executor] = [] + named: dict[str, AgentProtocol | Executor] = {} + for participant in participants: + identifier: str + if isinstance(participant, Executor): + identifier = participant.id + elif isinstance(participant, AgentProtocol): + name_attr = getattr(participant, "name", None) + if not name_attr: + raise ValueError( + "Agents used in handoff workflows must have a stable name " + "so they can be addressed during routing." + ) + identifier = str(name_attr) + else: + raise TypeError( + f"Participants must be AgentProtocol or Executor instances. Got {type(participant).__name__}." + ) + if identifier in named: + raise ValueError(f"Duplicate participant name '{identifier}' detected") + named[identifier] = participant + + metadata = prepare_participant_metadata( + named, + description_factory=lambda name, participant: getattr(participant, "description", None) or name, + ) + + wrapped = metadata["executors"] seen_ids: set[str] = set() - alias_map: dict[str, str] = {} - - def _register_alias(alias: str | None, exec_id: str) -> None: - """Record canonical and sanitised aliases that resolve to the executor id.""" - if not alias: - return - alias_map[alias] = exec_id - sanitized = _sanitize_alias(alias) - if sanitized and sanitized not in alias_map: - alias_map[sanitized] = exec_id - - for p in participants: - executor = self._wrap_participant(p) + for executor in wrapped.values(): if executor.id in seen_ids: raise ValueError(f"Duplicate participant with id '{executor.id}' detected") seen_ids.add(executor.id) - wrapped.append(executor) - - _register_alias(executor.id, executor.id) - if isinstance(p, AgentProtocol): - name = getattr(p, "name", None) - _register_alias(name, executor.id) - display = getattr(p, "display_name", None) - if isinstance(display, str) and display: - _register_alias(display, executor.id) - - self._executors = {executor.id: executor for executor in wrapped} - self._aliases = alias_map + + self._executors = {executor.id: executor for executor in wrapped.values()} + self._aliases = metadata["aliases"] self._starting_agent_id = None return self @@ -1023,7 +995,7 @@ def _apply_auto_tools(self, agent: ChatAgent, specialists: Mapping[str, Executor new_tools: list[Any] = [] for exec_id in specialists: alias = exec_id - sanitized = _sanitize_alias(alias) + sanitized = sanitize_identifier(alias) tool = _create_handoff_tool(alias) if tool.name not in existing_names: new_tools.append(tool) @@ -1184,12 +1156,16 @@ def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "HandoffB self._checkpoint_storage = checkpoint_storage return self - def with_termination_condition(self, condition: Callable[[list[ChatMessage]], bool]) -> "HandoffBuilder": + def with_termination_condition( + self, condition: Callable[[list[ChatMessage]], bool | Awaitable[bool]] + ) -> "HandoffBuilder": """Set a custom termination condition for the handoff workflow. + The condition can be either synchronous or asynchronous. + Args: condition: Function that receives the full conversation and returns True - if the workflow should terminate (not request further user input). + (or awaitable True) if the workflow should terminate (not request further user input). Returns: Self for chaining. @@ -1198,9 +1174,19 @@ def with_termination_condition(self, condition: Callable[[list[ChatMessage]], bo .. code-block:: python + # Synchronous condition builder.with_termination_condition( lambda conv: len(conv) > 20 or any("goodbye" in msg.text.lower() for msg in conv[-2:]) ) + + + # Asynchronous condition + async def check_termination(conv: list[ChatMessage]) -> bool: + # Can perform async operations + return len(conv) > 20 + + + builder.with_termination_condition(check_termination) """ self._termination_condition = condition return self @@ -1308,6 +1294,14 @@ def build(self) -> Workflow: if not specialists: logger.warning("Handoff workflow has no specialist agents; the coordinator will loop with the user.") + descriptions = { + exec_id: getattr(executor, "description", None) or exec_id for exec_id, executor in self._executors.items() + } + participant_specs = { + exec_id: GroupChatParticipantSpec(name=exec_id, participant=executor, description=descriptions[exec_id]) + for exec_id, executor in self._executors.items() + } + input_node = _InputToConversation(id="input-conversation") request_info = RequestInfoExecutor(id=f"{starting_executor.id}_handoff_requests") user_gateway = _UserInputGateway( @@ -1316,48 +1310,50 @@ def build(self) -> Workflow: prompt=self._request_prompt, id="handoff-user-input", ) - coordinator = _HandoffCoordinator( - starting_agent_id=starting_executor.id, - specialist_ids={alias: exec_id for alias, exec_id in self._aliases.items() if exec_id in specialists}, - input_gateway_id=user_gateway.id, - termination_condition=self._termination_condition, - id="handoff-coordinator", - handoff_tool_targets=handoff_tool_targets, - ) - builder = WorkflowBuilder(name=self._name, description=self._description) - builder.set_start_executor(input_node) - builder.add_edge(input_node, starting_executor) - builder.add_edge(starting_executor, coordinator) + specialist_aliases = {alias: exec_id for alias, exec_id in self._aliases.items() if exec_id in specialists} + + def _handoff_orchestrator_factory(_: _GroupChatConfig) -> Executor: + return _HandoffCoordinator( + starting_agent_id=starting_executor.id, + specialist_ids=specialist_aliases, + input_gateway_id=user_gateway.id, + termination_condition=self._termination_condition, + id="handoff-coordinator", + handoff_tool_targets=handoff_tool_targets, + ) - for specialist in specialists.values(): - builder.add_edge(coordinator, specialist) - builder.add_edge(specialist, coordinator) + wiring = _GroupChatConfig( + manager=None, + manager_name=self._starting_agent_id, + participants=participant_specs, + max_rounds=None, + participant_aliases=self._aliases, + participant_executors=self._executors, + ) - builder.add_edge(coordinator, user_gateway) - builder.add_edge(user_gateway, request_info) - builder.add_edge(request_info, user_gateway) - builder.add_edge(user_gateway, coordinator) # Route back to coordinator, not directly to agent - builder.add_edge(coordinator, starting_executor) # Coordinator sends trimmed request to agent + result = assemble_group_chat_workflow( + wiring=wiring, + participant_factory=_default_participant_factory, + orchestrator_factory=_handoff_orchestrator_factory, + interceptors=(), + checkpoint_storage=self._checkpoint_storage, + builder=WorkflowBuilder(name=self._name, description=self._description), + return_builder=True, + ) + if not isinstance(result, tuple): + raise TypeError("Expected tuple from assemble_group_chat_workflow with return_builder=True") + builder, coordinator = result - if self._checkpoint_storage is not None: - builder = builder.with_checkpointing(self._checkpoint_storage) + builder = builder.set_start_executor(input_node) + builder = builder.add_edge(input_node, starting_executor) + builder = builder.add_edge(coordinator, user_gateway) + builder = builder.add_edge(user_gateway, request_info) + builder = builder.add_edge(request_info, user_gateway) + builder = builder.add_edge(user_gateway, coordinator) return builder.build() - def _wrap_participant(self, participant: AgentProtocol | Executor) -> Executor: - """Ensure every participant is represented as an Executor instance.""" - if isinstance(participant, Executor): - return participant - if isinstance(participant, AgentProtocol): - name = getattr(participant, "name", None) - if not name: - raise ValueError( - "Agents used in handoff workflows must have a stable name so they can be addressed during routing." - ) - return AgentExecutor(participant, id=name) - raise TypeError(f"Participants must be AgentProtocol or Executor instances. Got {type(participant).__name__}.") - def _resolve_to_id(self, candidate: str | AgentProtocol | Executor) -> str: """Resolve a participant reference into a concrete executor identifier.""" if isinstance(candidate, Executor): diff --git a/python/packages/core/agent_framework/_workflows/_magentic.py b/python/packages/core/agent_framework/_workflows/_magentic.py index 2b132633aa..8368b23845 100644 --- a/python/packages/core/agent_framework/_workflows/_magentic.py +++ b/python/packages/core/agent_framework/_workflows/_magentic.py @@ -7,10 +7,10 @@ import re import sys from abc import ABC, abstractmethod -from collections.abc import AsyncIterable, Awaitable, Callable +from collections.abc import AsyncIterable, Sequence from dataclasses import dataclass, field from enum import Enum -from typing import Any, Literal, Protocol, TypeVar, Union, cast +from typing import Any, Protocol, TypeVar, Union, cast from uuid import uuid4 from agent_framework import ( @@ -23,16 +23,25 @@ FunctionResultContent, Role, ) -from agent_framework._agents import BaseAgent +from ._base_group_chat_orchestrator import BaseGroupChatOrchestrator from ._checkpoint import CheckpointStorage, WorkflowCheckpoint from ._const import EXECUTOR_STATE_KEY from ._events import WorkflowEvent from ._executor import Executor, handler +from ._group_chat import ( + GroupChatBuilder, + _GroupChatConfig, # type: ignore[reportPrivateUsage] + _GroupChatParticipantPipeline, # type: ignore[reportPrivateUsage] + _GroupChatRequestMessage, # type: ignore[reportPrivateUsage] + _GroupChatResponseMessage, # type: ignore[reportPrivateUsage] + group_chat_orchestrator, +) +from ._message_utils import normalize_messages_input from ._model_utils import DictConvertible, encode_value -from ._request_info_executor import RequestInfoMessage, RequestResponse +from ._participant_utils import GroupChatParticipantSpec, participant_description +from ._request_info_executor import RequestInfoExecutor, RequestInfoMessage, RequestResponse from ._workflow import Workflow, WorkflowRunResult -from ._workflow_builder import WorkflowBuilder from ._workflow_context import WorkflowContext if sys.version_info >= (3, 11): @@ -92,32 +101,20 @@ def _message_from_payload(payload: Any) -> ChatMessage: # region Unified callback API (developer-facing) -class MagenticCallbackMode(str, Enum): - """Controls whether agent deltas are surfaced via on_event. - - STREAMING: emit AgentDeltaEvent chunks and a final AgentMessageEvent. - NON_STREAMING: suppress deltas and only emit AgentMessageEvent. - """ - - STREAMING = "streaming" - NON_STREAMING = "non_streaming" - - @dataclass -class MagenticOrchestratorMessageEvent: - source: Literal["orchestrator"] = "orchestrator" +class MagenticOrchestratorMessageEvent(WorkflowEvent): orchestrator_id: str = "" message: ChatMessage | None = None - # Kind values include: user_task, task_ledger, instruction, notice kind: str = "" + def __post_init__(self) -> None: + super().__init__(data=self.message) + @dataclass -class MagenticAgentDeltaEvent: - source: Literal["agent"] = "agent" +class MagenticAgentDeltaEvent(WorkflowEvent): agent_id: str | None = None text: str | None = None - # Optional: function/tool streaming payloads function_call_id: str | None = None function_call_name: str | None = None function_call_arguments: Any | None = None @@ -125,19 +122,26 @@ class MagenticAgentDeltaEvent: function_result: Any | None = None role: Role | None = None + def __post_init__(self) -> None: + super().__init__(data=self.text) + @dataclass -class MagenticAgentMessageEvent: - source: Literal["agent"] = "agent" +class MagenticAgentMessageEvent(WorkflowEvent): agent_id: str = "" message: ChatMessage | None = None + def __post_init__(self) -> None: + super().__init__(data=self.message) + @dataclass -class MagenticFinalResultEvent: - source: Literal["workflow"] = "workflow" +class MagenticFinalResultEvent(WorkflowEvent): message: ChatMessage | None = None + def __post_init__(self) -> None: + super().__init__(data=self.message) + MagenticCallbackEvent = Union[ MagenticOrchestratorMessageEvent, @@ -318,48 +322,71 @@ def _new_participant_descriptions() -> dict[str, str]: return {} +def _new_chat_message_list() -> list[ChatMessage]: + """Typed default factory for ChatMessage list to satisfy type checkers.""" + return [] + + @dataclass -class MagenticStartMessage: - """A message to start a magentic workflow.""" +class _MagenticStartMessage(DictConvertible): + """Internal: A message to start a magentic workflow.""" - def __init__(self, task: ChatMessage) -> None: - """Create the start message.""" - self.task = task + messages: list[ChatMessage] = field(default_factory=_new_chat_message_list) - @classmethod - def from_string(cls, task_text: str) -> "MagenticStartMessage": - """Create a MagenticStartMessage from a simple string. + def __init__( + self, + messages: str | ChatMessage | Sequence[str] | Sequence[ChatMessage] | None = None, + *, + task: ChatMessage | None = None, + ) -> None: + normalized = normalize_messages_input(messages) + if task is not None: + normalized += normalize_messages_input(task) + if not normalized: + raise ValueError("MagenticStartMessage requires at least one message input.") + self.messages: list[ChatMessage] = normalized - Args: - task_text: The task description as a string. + @property + def task(self) -> ChatMessage: + """Final user message for the task.""" + return self.messages[-1] - Returns: - A MagenticStartMessage with the string converted to a ChatMessage. - """ - return cls(task=ChatMessage(role=Role.USER, text=task_text)) + @classmethod + def from_string(cls, task_text: str) -> "_MagenticStartMessage": + """Create a MagenticStartMessage from a simple string.""" + return cls(task_text) def to_dict(self) -> dict[str, Any]: """Create a dict representation of the message.""" - return {"task": self.task.to_dict()} + return { + "messages": [message.to_dict() for message in self.messages], + "task": self.task.to_dict(), + } @classmethod - def from_dict(cls, value: dict[str, Any]) -> "MagenticStartMessage": + def from_dict(cls, data: dict[str, Any]) -> "_MagenticStartMessage": """Create from a dict.""" - task = ChatMessage.from_dict(value["task"]) - return cls(task=task) + if "messages" in data: + raw_messages = data["messages"] + if not isinstance(raw_messages, Sequence) or isinstance(raw_messages, (str, bytes)): + raise TypeError("MagenticStartMessage 'messages' must be a sequence.") + messages: list[ChatMessage] = [ChatMessage.from_dict(raw) for raw in raw_messages] # type: ignore[arg-type] + return cls(messages) + if "task" in data: + task = ChatMessage.from_dict(data["task"]) + return cls(task) + raise KeyError("Expected 'messages' or 'task' in MagenticStartMessage payload.") @dataclass -class MagenticRequestMessage: - """A request message type for agents in a magentic workflow.""" +class _MagenticRequestMessage(_GroupChatRequestMessage): + """Internal: A request message type for agents in a magentic workflow.""" - agent_name: str - instruction: str = "" task_context: str = "" -class MagenticResponseMessage: - """A response message type. +class _MagenticResponseMessage(_GroupChatResponseMessage): + """Internal: A response message type. When emitted by the orchestrator you can mark it as a broadcast to all agents, or target a specific agent by name. @@ -371,6 +398,11 @@ def __init__( target_agent: str | None = None, # deliver only to this agent if set broadcast: bool = False, # deliver to all agents if True ) -> None: + agent_name = body.author_name or "" + super().__init__( + agent_name=agent_name, + message=body, + ) self.body = body self.target_agent = target_agent self.broadcast = broadcast @@ -380,7 +412,7 @@ def to_dict(self) -> dict[str, Any]: return {"body": self.body.to_dict(), "target_agent": self.target_agent, "broadcast": self.broadcast} @classmethod - def from_dict(cls, value: dict[str, Any]) -> "MagenticResponseMessage": + def from_dict(cls, value: dict[str, Any]) -> "_MagenticResponseMessage": """Create from a dict.""" body = ChatMessage.from_dict(value["body"]) target_agent = value.get("target_agent") @@ -389,8 +421,8 @@ def from_dict(cls, value: dict[str, Any]) -> "MagenticResponseMessage": @dataclass -class MagenticPlanReviewRequest(RequestInfoMessage): - """Human-in-the-loop request to review and optionally edit the plan before execution.""" +class _MagenticPlanReviewRequest(RequestInfoMessage): + """Internal: Human-in-the-loop request to review and optionally edit the plan before execution.""" # Because RequestInfoMessage defines a default field (request_id), # subclass fields must also have defaults to satisfy dataclass rules. @@ -406,8 +438,8 @@ class MagenticPlanReviewDecision(str, Enum): @dataclass -class MagenticPlanReviewReply: - """Human reply to a plan review request.""" +class _MagenticPlanReviewReply: + """Internal: Human reply to a plan review request.""" decision: MagenticPlanReviewDecision edited_plan_text: str | None = None # if supplied, becomes the new plan text verbatim @@ -415,8 +447,8 @@ class MagenticPlanReviewReply: @dataclass -class MagenticTaskLedger(DictConvertible): - """Task ledger for the Standard Magentic manager.""" +class _MagenticTaskLedger(DictConvertible): + """Internal: Task ledger for the Standard Magentic manager.""" facts: ChatMessage plan: ChatMessage @@ -425,7 +457,7 @@ def to_dict(self) -> dict[str, Any]: return {"facts": _message_to_payload(self.facts), "plan": _message_to_payload(self.plan)} @classmethod - def from_dict(cls, data: dict[str, Any]) -> "MagenticTaskLedger": + def from_dict(cls, data: dict[str, Any]) -> "_MagenticTaskLedger": return cls( facts=_message_from_payload(data.get("facts")), plan=_message_from_payload(data.get("plan")), @@ -433,8 +465,8 @@ def from_dict(cls, data: dict[str, Any]) -> "MagenticTaskLedger": @dataclass -class MagenticProgressLedgerItem(DictConvertible): - """A progress ledger item.""" +class _MagenticProgressLedgerItem(DictConvertible): + """Internal: A progress ledger item.""" reason: str answer: str | bool @@ -443,7 +475,7 @@ def to_dict(self) -> dict[str, Any]: return {"reason": self.reason, "answer": self.answer} @classmethod - def from_dict(cls, data: dict[str, Any]) -> "MagenticProgressLedgerItem": + def from_dict(cls, data: dict[str, Any]) -> "_MagenticProgressLedgerItem": answer_value = data.get("answer") if not isinstance(answer_value, (str, bool)): answer_value = "" # Default to empty string if not str or bool @@ -451,14 +483,14 @@ def from_dict(cls, data: dict[str, Any]) -> "MagenticProgressLedgerItem": @dataclass -class MagenticProgressLedger(DictConvertible): - """A progress ledger for tracking workflow progress.""" +class _MagenticProgressLedger(DictConvertible): + """Internal: A progress ledger for tracking workflow progress.""" - is_request_satisfied: MagenticProgressLedgerItem - is_in_loop: MagenticProgressLedgerItem - is_progress_being_made: MagenticProgressLedgerItem - next_speaker: MagenticProgressLedgerItem - instruction_or_question: MagenticProgressLedgerItem + is_request_satisfied: _MagenticProgressLedgerItem + is_in_loop: _MagenticProgressLedgerItem + is_progress_being_made: _MagenticProgressLedgerItem + next_speaker: _MagenticProgressLedgerItem + instruction_or_question: _MagenticProgressLedgerItem def to_dict(self) -> dict[str, Any]: return { @@ -470,13 +502,13 @@ def to_dict(self) -> dict[str, Any]: } @classmethod - def from_dict(cls, data: dict[str, Any]) -> "MagenticProgressLedger": + def from_dict(cls, data: dict[str, Any]) -> "_MagenticProgressLedger": return cls( - is_request_satisfied=MagenticProgressLedgerItem.from_dict(data.get("is_request_satisfied", {})), - is_in_loop=MagenticProgressLedgerItem.from_dict(data.get("is_in_loop", {})), - is_progress_being_made=MagenticProgressLedgerItem.from_dict(data.get("is_progress_being_made", {})), - next_speaker=MagenticProgressLedgerItem.from_dict(data.get("next_speaker", {})), - instruction_or_question=MagenticProgressLedgerItem.from_dict(data.get("instruction_or_question", {})), + is_request_satisfied=_MagenticProgressLedgerItem.from_dict(data.get("is_request_satisfied", {})), + is_in_loop=_MagenticProgressLedgerItem.from_dict(data.get("is_in_loop", {})), + is_progress_being_made=_MagenticProgressLedgerItem.from_dict(data.get("is_progress_being_made", {})), + next_speaker=_MagenticProgressLedgerItem.from_dict(data.get("next_speaker", {})), + instruction_or_question=_MagenticProgressLedgerItem.from_dict(data.get("instruction_or_question", {})), ) @@ -633,7 +665,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: ... @abstractmethod - async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: + async def create_progress_ledger(self, magentic_context: MagenticContext) -> _MagenticProgressLedger: """Create a progress ledger.""" ... @@ -662,7 +694,7 @@ class StandardMagenticManager(MagenticManagerBase): - Final answer synthesis """ - task_ledger: MagenticTaskLedger | None + task_ledger: _MagenticTaskLedger | None def snapshot_state(self) -> dict[str, Any]: state = super().snapshot_state() @@ -676,14 +708,14 @@ def restore_state(self, state: dict[str, Any]) -> None: ledger = state.get("task_ledger") if ledger is not None: try: - self.task_ledger = MagenticTaskLedger.from_dict(ledger) + self.task_ledger = _MagenticTaskLedger.from_dict(ledger) except Exception: # pragma: no cover - defensive logger.warning("Failed to restore manager task ledger from checkpoint state") def __init__( self, chat_client: ChatClientProtocol, - task_ledger: MagenticTaskLedger | None = None, + task_ledger: _MagenticTaskLedger | None = None, *, instructions: str | None = None, task_ledger_facts_prompt: str | None = None, @@ -726,7 +758,7 @@ def __init__( self.chat_client: ChatClientProtocol = chat_client self.instructions: str | None = instructions - self.task_ledger: MagenticTaskLedger | None = task_ledger + self.task_ledger: _MagenticTaskLedger | None = task_ledger # Prompts may be overridden if needed self.task_ledger_facts_prompt: str = task_ledger_facts_prompt or ORCHESTRATOR_TASK_LEDGER_FACTS_PROMPT @@ -799,7 +831,7 @@ async def plan(self, magentic_context: MagenticContext) -> ChatMessage: plan_msg = await self._complete([*magentic_context.chat_history, facts_user, facts_msg, plan_user]) # Store ledger and render full combined view - self.task_ledger = MagenticTaskLedger(facts=facts_msg, plan=plan_msg) + self.task_ledger = _MagenticTaskLedger(facts=facts_msg, plan=plan_msg) # Also store individual messages in chat_history for better grounding # This gives the progress ledger model access to the detailed reasoning @@ -841,7 +873,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: ]) # Store and render - self.task_ledger = MagenticTaskLedger(facts=updated_facts, plan=updated_plan) + self.task_ledger = _MagenticTaskLedger(facts=updated_facts, plan=updated_plan) # Also store individual messages in chat_history for better grounding # This gives the progress ledger model access to the detailed reasoning @@ -855,7 +887,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: ) return ChatMessage(role=Role.ASSISTANT, text=combined, author_name=MAGENTIC_MANAGER_NAME) - async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: + async def create_progress_ledger(self, magentic_context: MagenticContext) -> _MagenticProgressLedger: """Use the model to produce a JSON progress ledger based on the conversation so far. Adds lightweight retries with backoff for transient parse issues and avoids selecting a @@ -882,7 +914,7 @@ async def create_progress_ledger(self, magentic_context: MagenticContext) -> Mag raw = await self._complete([*magentic_context.chat_history, user_message]) try: ledger_dict = _extract_json(raw.text) - return _coerce_model(MagenticProgressLedger, ledger_dict) + return _coerce_model(_MagenticProgressLedger, ledger_dict) except Exception as ex: last_error = ex attempts += 1 @@ -915,7 +947,7 @@ async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatM # region Magentic Executors -class MagenticOrchestratorExecutor(Executor): +class MagenticOrchestratorExecutor(BaseGroupChatOrchestrator): """Magentic orchestrator executor that handles all orchestration logic. This executor manages the entire Magentic One workflow including: @@ -939,11 +971,7 @@ def __init__( self, manager: MagenticManagerBase, participants: dict[str, str], - result_callback: Callable[[ChatMessage], Awaitable[None]] | None = None, - agent_response_callback: Callable[[str, ChatMessage], Awaitable[None]] | None = None, - streaming_agent_response_callback: Callable[[str, AgentRunResponseUpdate, bool], Awaitable[None]] | None = None, *, - message_callback: Callable[[str, ChatMessage, str], Awaitable[None]] | None = None, require_plan_signoff: bool = False, max_plan_review_rounds: int = 10, executor_id: str | None = None, @@ -953,11 +981,6 @@ def __init__( Args: manager: The Magentic manager instance. participants: A dictionary of participant IDs to their names. - result_callback: An optional callback for handling final results. - message_callback: An optional generic callback for orchestrator-emitted messages. The third - argument is a kind string, e.g., ORCH_MSG_KIND_USER_TASK or ORCH_MSG_KIND_TASK_LEDGER. - agent_response_callback: An optional callback for handling agent responses. - streaming_agent_response_callback: An optional callback for handling streaming agent responses. require_plan_signoff: Whether to require plan sign-off from a human. max_plan_review_rounds: The maximum number of plan review rounds. executor_id: An optional executor ID. @@ -965,10 +988,6 @@ def __init__( super().__init__(executor_id or f"magentic_orchestrator_{uuid4().hex[:8]}") self._manager = manager self._participants = participants - self._result_callback = result_callback - self._message_callback = message_callback - self._agent_response_callback = agent_response_callback - self._streaming_agent_response_callback = streaming_agent_response_callback self._context = None self._task_ledger = None self._require_plan_signoff = require_plan_signoff @@ -981,11 +1000,52 @@ def __init__( # Tracks whether checkpoint state has been applied for this run self._state_restored = False + def _get_author_name(self) -> str: + """Get the magentic manager name for orchestrator-generated messages.""" + return MAGENTIC_MANAGER_NAME + def register_agent_executor(self, name: str, executor: "MagenticAgentExecutor") -> None: """Register an agent executor for internal control (no messages).""" self._agent_executors[name] = executor + async def _emit_orchestrator_message( + self, + ctx: WorkflowContext[Any, ChatMessage], + message: ChatMessage, + kind: str, + ) -> None: + """Emit orchestrator message to the workflow event stream. + + Orchestrator messages flow through the unified workflow event stream as + MagenticOrchestratorMessageEvent instances. Consumers should subscribe to + these events via workflow.run_stream(). + + Args: + ctx: Workflow context for adding events to the stream + message: Orchestrator message to emit (task, plan, instruction, notice) + kind: Message classification (user_task, task_ledger, instruction, notice) + + Example: + async for event in workflow.run_stream("task"): + if isinstance(event, MagenticOrchestratorMessageEvent): + print(f"Orchestrator {event.kind}: {event.message.text}") + """ + event = MagenticOrchestratorMessageEvent( + orchestrator_id=self.id, + message=message, + kind=kind, + ) + await ctx.add_event(event) + def snapshot_state(self) -> dict[str, Any]: + """Capture current orchestrator state for checkpointing. + + Uses OrchestrationState for structure but maintains Magentic's complex metadata + at the top level for backward compatibility with existing checkpoints. + + Returns: + Dict ready for checkpoint persistence + """ state: dict[str, Any] = { "plan_review_round": self._plan_review_round, "max_plan_review_rounds": self._max_plan_review_rounds, @@ -1004,6 +1064,22 @@ def snapshot_state(self) -> dict[str, Any]: return state def restore_state(self, state: dict[str, Any]) -> None: + """Restore orchestrator state from checkpoint. + + Maintains backward compatibility with existing Magentic checkpoints + while supporting OrchestrationState structure. + + Args: + state: Checkpoint data dict + """ + # Support both old format (direct keys) and new format (wrapped in OrchestrationState) + if "metadata" in state and isinstance(state.get("metadata"), dict): + # New OrchestrationState format - extract metadata + from ._orchestration_state import OrchestrationState + + orch_state = OrchestrationState.from_dict(state) + state = orch_state.metadata + ctx_payload = state.get("magentic_context") if ctx_payload is not None: try: @@ -1067,6 +1143,28 @@ def _reconcile_restored_participants(self) -> None: for name, description in expected.items(): restored[name] = description + def _snapshot_pattern_metadata(self) -> dict[str, Any]: + """Serialize pattern-specific state. + + Magentic uses custom snapshot_state() instead of base class hooks. + This method exists to satisfy the base class contract. + + Returns: + Empty dict (Magentic manages its own state) + """ + return {} + + def _restore_pattern_metadata(self, metadata: dict[str, Any]) -> None: + """Restore pattern-specific state. + + Magentic uses custom restore_state() instead of base class hooks. + This method exists to satisfy the base class contract. + + Args: + metadata: Pattern-specific state dict (ignored) + """ + pass + async def _ensure_state_restored( self, context: WorkflowContext[Any, Any], @@ -1091,9 +1189,9 @@ async def _ensure_state_restored( @handler async def handle_start_message( self, - message: MagenticStartMessage, + message: _MagenticStartMessage, context: WorkflowContext[ - MagenticResponseMessage | MagenticRequestMessage | MagenticPlanReviewRequest, ChatMessage + _MagenticResponseMessage | _MagenticRequestMessage | _MagenticPlanReviewRequest, ChatMessage ], ) -> None: """Handle the initial start message to begin orchestration.""" @@ -1105,11 +1203,11 @@ async def handle_start_message( task=message.task, participant_descriptions=self._participants, ) + if message.messages: + self._context.chat_history.extend(message.messages) self._state_restored = True # Non-streaming callback for the orchestrator receipt of the task - if self._message_callback: - with contextlib.suppress(Exception): - await self._message_callback(self.id, message.task, ORCH_MSG_KIND_USER_TASK) + await self._emit_orchestrator_message(context, message.task, ORCH_MSG_KIND_USER_TASK) # Initial planning using the manager with real model calls self._task_ledger = await self._manager.plan(self._context.clone(deep=True)) @@ -1124,22 +1222,50 @@ async def handle_start_message( logger.debug("Task ledger created.") - if self._message_callback: - with contextlib.suppress(Exception): - await self._message_callback(self.id, self._task_ledger, ORCH_MSG_KIND_TASK_LEDGER) + await self._emit_orchestrator_message(context, self._task_ledger, ORCH_MSG_KIND_TASK_LEDGER) # Start the inner loop ctx2 = cast( - WorkflowContext[MagenticResponseMessage | MagenticRequestMessage, ChatMessage], + WorkflowContext[_MagenticResponseMessage | _MagenticRequestMessage, ChatMessage], context, ) await self._run_inner_loop(ctx2) + @handler + async def handle_task_text( + self, + task_text: str, + context: WorkflowContext[ + _MagenticResponseMessage | _MagenticRequestMessage | _MagenticPlanReviewRequest, ChatMessage + ], + ) -> None: + await self.handle_start_message(_MagenticStartMessage.from_string(task_text), context) + + @handler + async def handle_task_message( + self, + task_message: ChatMessage, + context: WorkflowContext[ + _MagenticResponseMessage | _MagenticRequestMessage | _MagenticPlanReviewRequest, ChatMessage + ], + ) -> None: + await self.handle_start_message(_MagenticStartMessage(task_message), context) + + @handler + async def handle_task_messages( + self, + conversation: list[ChatMessage], + context: WorkflowContext[ + _MagenticResponseMessage | _MagenticRequestMessage | _MagenticPlanReviewRequest, ChatMessage + ], + ) -> None: + await self.handle_start_message(_MagenticStartMessage(conversation), context) + @handler async def handle_response_message( self, - message: MagenticResponseMessage, - context: WorkflowContext[MagenticResponseMessage | MagenticRequestMessage, ChatMessage], + message: _MagenticResponseMessage, + context: WorkflowContext[_MagenticResponseMessage | _MagenticRequestMessage, ChatMessage], ) -> None: """Handle responses from agents.""" if getattr(self, "_terminated", False): @@ -1167,10 +1293,10 @@ async def handle_response_message( @handler async def handle_plan_review_response( self, - response: RequestResponse[MagenticPlanReviewRequest, MagenticPlanReviewReply], + response: RequestResponse[_MagenticPlanReviewRequest, _MagenticPlanReviewReply], context: WorkflowContext[ # may broadcast ledger next, or ask for another round of review - MagenticResponseMessage | MagenticRequestMessage | MagenticPlanReviewRequest, ChatMessage + _MagenticResponseMessage | _MagenticRequestMessage | _MagenticPlanReviewRequest, ChatMessage ], ) -> None: if getattr(self, "_terminated", False): @@ -1182,7 +1308,7 @@ async def handle_plan_review_response( human = response.data if human is None: # type: ignore[unreachable] # Defensive fallback: treat as revise with empty comments - human = MagenticPlanReviewReply(decision=MagenticPlanReviewDecision.REVISE, comments="") + human = _MagenticPlanReviewReply(decision=MagenticPlanReviewDecision.REVISE, comments="") if human.decision == MagenticPlanReviewDecision.APPROVE: # Close the review loop on approval (no further plan review requests this run) @@ -1217,13 +1343,11 @@ async def handle_plan_review_response( # Record the signed-off plan (no broadcast) if self._task_ledger: self._context.chat_history.append(self._task_ledger) - if self._message_callback: - with contextlib.suppress(Exception): - await self._message_callback(self.id, self._task_ledger, ORCH_MSG_KIND_TASK_LEDGER) + await self._emit_orchestrator_message(context, self._task_ledger, ORCH_MSG_KIND_TASK_LEDGER) # Enter the normal coordination loop ctx2 = cast( - WorkflowContext[MagenticResponseMessage | MagenticRequestMessage, ChatMessage], + WorkflowContext[_MagenticResponseMessage | _MagenticRequestMessage, ChatMessage], context, ) await self._run_inner_loop(ctx2) @@ -1245,14 +1369,12 @@ async def handle_plan_review_response( author_name=MAGENTIC_MANAGER_NAME, ) self._context.chat_history.append(notice) - if self._message_callback: - with contextlib.suppress(Exception): - await self._message_callback(self.id, notice, ORCH_MSG_KIND_NOTICE) + await self._emit_orchestrator_message(context, notice, ORCH_MSG_KIND_NOTICE) if self._task_ledger: self._context.chat_history.append(self._task_ledger) # No further review requests; proceed directly into coordination ctx2 = cast( - WorkflowContext[MagenticResponseMessage | MagenticRequestMessage, ChatMessage], + WorkflowContext[_MagenticResponseMessage | _MagenticRequestMessage, ChatMessage], context, ) await self._run_inner_loop(ctx2) @@ -1287,7 +1409,7 @@ async def handle_plan_review_response( async def _run_outer_loop( self, - context: WorkflowContext[MagenticResponseMessage | MagenticRequestMessage, ChatMessage], + context: WorkflowContext[_MagenticResponseMessage | _MagenticRequestMessage, ChatMessage], ) -> None: """Run the outer orchestration loop - planning phase.""" if self._context is None: @@ -1302,16 +1424,15 @@ async def _run_outer_loop( self._context.chat_history.append(self._task_ledger) # Optionally surface the updated task ledger via message callback (no broadcast) - if self._task_ledger and self._message_callback: - with contextlib.suppress(Exception): - await self._message_callback(self.id, self._task_ledger, ORCH_MSG_KIND_TASK_LEDGER) + if self._task_ledger is not None: + await self._emit_orchestrator_message(context, self._task_ledger, ORCH_MSG_KIND_TASK_LEDGER) # Start inner loop await self._run_inner_loop(context) async def _run_inner_loop( self, - context: WorkflowContext[MagenticResponseMessage | MagenticRequestMessage, ChatMessage], + context: WorkflowContext[_MagenticResponseMessage | _MagenticRequestMessage, ChatMessage], ) -> None: """Run the inner orchestration loop. Coordination phase. Serialized with a lock.""" if self._context is None or self._task_ledger is None: @@ -1321,7 +1442,7 @@ async def _run_inner_loop( async def _run_inner_loop_helper( self, - context: WorkflowContext[MagenticResponseMessage | MagenticRequestMessage, ChatMessage], + context: WorkflowContext[_MagenticResponseMessage | _MagenticRequestMessage, ChatMessage], ) -> None: """Run inner loop with exclusive access.""" # Narrow optional context for the remainder of this method @@ -1388,10 +1509,7 @@ async def _run_inner_loop_helper( author_name=MAGENTIC_MANAGER_NAME, ) ctx.chat_history.append(instruction_msg) - # Surface instruction message to observers - if self._message_callback: - with contextlib.suppress(Exception): - await self._message_callback(self.id, instruction_msg, ORCH_MSG_KIND_INSTRUCTION) + await self._emit_orchestrator_message(context, instruction_msg, ORCH_MSG_KIND_INSTRUCTION) # Determine the selected agent's executor id target_executor_id = f"agent_{next_speaker_value}" @@ -1399,7 +1517,7 @@ async def _run_inner_loop_helper( # Request specific agent to respond logger.debug("Magentic Orchestrator: Requesting %s to respond", next_speaker_value) await context.send_message( - MagenticRequestMessage( + _MagenticRequestMessage( agent_name=next_speaker_value, instruction=str(instruction), task_context=ctx.task.text, @@ -1409,7 +1527,7 @@ async def _run_inner_loop_helper( async def _reset_and_replan( self, - context: WorkflowContext[MagenticResponseMessage | MagenticRequestMessage, ChatMessage], + context: WorkflowContext[_MagenticResponseMessage | _MagenticRequestMessage, ChatMessage], ) -> None: """Reset context and replan.""" if self._context is None: @@ -1422,6 +1540,8 @@ async def _reset_and_replan( # Replan self._task_ledger = await self._manager.replan(self._context.clone(deep=True)) + self._context.chat_history.append(self._task_ledger) + await self._emit_orchestrator_message(context, self._task_ledger, ORCH_MSG_KIND_TASK_LEDGER) # Internally reset all registered agent executors (no handler/messages involved) for agent in self._agent_executors.values(): @@ -1433,7 +1553,7 @@ async def _reset_and_replan( async def _prepare_final_answer( self, - context: WorkflowContext[MagenticResponseMessage | MagenticRequestMessage, ChatMessage], + context: WorkflowContext[_MagenticResponseMessage | _MagenticRequestMessage, ChatMessage], ) -> None: """Prepare the final answer using the manager.""" if self._context is None: @@ -1444,13 +1564,11 @@ async def _prepare_final_answer( # Emit a completed event for the workflow await context.yield_output(final_answer) - - if self._result_callback: - await self._result_callback(final_answer) + await context.add_event(MagenticFinalResultEvent(message=final_answer)) async def _check_within_limits_or_complete( self, - context: WorkflowContext[MagenticResponseMessage | MagenticRequestMessage, ChatMessage], + context: WorkflowContext[_MagenticResponseMessage | _MagenticRequestMessage, ChatMessage], ) -> bool: """Check if orchestrator is within operational limits.""" if self._context is None: @@ -1478,9 +1596,7 @@ async def _check_within_limits_or_complete( # Yield the partial result and signal completion await context.yield_output(partial_result) - - if self._result_callback: - await self._result_callback(partial_result) + await context.add_event(MagenticFinalResultEvent(message=partial_result)) return False return True @@ -1488,7 +1604,7 @@ async def _check_within_limits_or_complete( async def _send_plan_review_request( self, context: WorkflowContext[ - MagenticResponseMessage | MagenticRequestMessage | MagenticPlanReviewRequest, ChatMessage + _MagenticResponseMessage | _MagenticRequestMessage | _MagenticPlanReviewRequest, ChatMessage ], ) -> None: """Emit a PlanReviewRequest via RequestInfoExecutor.""" @@ -1500,7 +1616,7 @@ async def _send_plan_review_request( plan_text = ledger.plan.text if ledger else "" task_text = self._context.task.text if self._context else "" - req = MagenticPlanReviewRequest( + req = _MagenticPlanReviewRequest( task_text=task_text, facts_text=facts_text, plan_text=plan_text, @@ -1509,10 +1625,13 @@ async def _send_plan_review_request( await context.send_message(req) +# region Magentic Executors + + class MagenticAgentExecutor(Executor): """Magentic agent executor that wraps an agent for participation in workflows. - This executor handles: + Leverages enhanced AgentExecutor with conversation injection hooks for: - Receiving task ledger broadcasts - Responding to specific agent requests - Resetting agent state when needed @@ -1522,34 +1641,42 @@ def __init__( self, agent: AgentProtocol | Executor, agent_id: str, - agent_response_callback: Callable[[str, ChatMessage], Awaitable[None]] | None = None, - streaming_agent_response_callback: Callable[[str, AgentRunResponseUpdate, bool], Awaitable[None]] | None = None, ) -> None: super().__init__(f"agent_{agent_id}") self._agent = agent self._agent_id = agent_id self._chat_history: list[ChatMessage] = [] - self._agent_response_callback = agent_response_callback - self._streaming_agent_response_callback = streaming_agent_response_callback self._state_restored = False def snapshot_state(self) -> dict[str, Any]: + """Capture current executor state for checkpointing. + + Returns: + Dict containing serialized chat history + """ + from ._conversation_state import encode_chat_messages + return { - "chat_history": [_message_to_payload(msg) for msg in self._chat_history], + "chat_history": encode_chat_messages(self._chat_history), } def restore_state(self, state: dict[str, Any]) -> None: + """Restore executor state from checkpoint. + + Args: + state: Checkpoint data dict + """ + from ._conversation_state import decode_chat_messages + history_payload = state.get("chat_history") - if not history_payload: - self._chat_history = [] - return - restored: list[ChatMessage] = [] - for item in history_payload: + if history_payload: try: - restored.append(_message_from_payload(item)) + self._chat_history = decode_chat_messages(history_payload) except Exception as exc: # pragma: no cover - logger.debug("Agent %s: Skipping invalid chat history item during restore: %s", self._agent_id, exc) - self._chat_history = restored + logger.warning("Agent %s: Failed to restore chat history: %s", self._agent_id, exc) + self._chat_history = [] + else: + self._chat_history = [] async def _ensure_state_restored(self, context: WorkflowContext[Any, Any]) -> None: if self._state_restored and self._chat_history: @@ -1571,7 +1698,7 @@ async def _ensure_state_restored(self, context: WorkflowContext[Any, Any]) -> No @handler async def handle_response_message( - self, message: MagenticResponseMessage, context: WorkflowContext[MagenticResponseMessage] + self, message: _MagenticResponseMessage, context: WorkflowContext[_MagenticResponseMessage] ) -> None: """Handle response message (task ledger broadcast).""" logger.debug("Agent %s: Received response message", self._agent_id) @@ -1610,7 +1737,7 @@ def _get_persona_adoption_role(self) -> Role: @handler async def handle_request_message( - self, message: MagenticRequestMessage, context: WorkflowContext[MagenticResponseMessage] + self, message: _MagenticRequestMessage, context: WorkflowContext[_MagenticResponseMessage, AgentRunResponse] ) -> None: """Handle request to respond.""" if message.agent_name != self._agent_id: @@ -1641,13 +1768,15 @@ async def handle_request_message( text=f"{self._agent_id} is a workflow executor and cannot be invoked directly.", author_name=self._agent_id, ) + self._chat_history.append(response) + await self._emit_agent_message_event(context, response) else: # Invoke the agent - response = await self._invoke_agent() - self._chat_history.append(response) + response = await self._invoke_agent(context) + self._chat_history.append(response) # Send response back to orchestrator - await context.send_message(MagenticResponseMessage(body=response)) + await context.send_message(_MagenticResponseMessage(body=response)) except Exception as e: logger.warning("Agent %s invoke failed: %s", self._agent_id, e) @@ -1657,7 +1786,8 @@ async def handle_request_message( text=f"Agent {self._agent_id}: Error processing request - {str(e)[:100]}", ) self._chat_history.append(response) - await context.send_message(MagenticResponseMessage(body=response)) + await self._emit_agent_message_event(context, response) + await context.send_message(_MagenticResponseMessage(body=response)) def reset(self) -> None: """Reset the internal chat history of the agent (internal operation).""" @@ -1665,7 +1795,55 @@ def reset(self) -> None: self._chat_history.clear() self._state_restored = True - async def _invoke_agent(self) -> ChatMessage: + async def _emit_agent_delta_event( + self, + ctx: WorkflowContext[Any, Any], + update: AgentRunResponseUpdate, + ) -> None: + contents = list(getattr(update, "contents", []) or []) + chunk = getattr(update, "text", None) + if not chunk: + chunk = "".join(getattr(item, "text", "") for item in contents if hasattr(item, "text")) + if chunk: + await ctx.add_event( + MagenticAgentDeltaEvent( + agent_id=self._agent_id, + text=chunk or None, + role=getattr(update, "role", None), + ) + ) + for item in contents: + if isinstance(item, FunctionCallContent): + await ctx.add_event( + MagenticAgentDeltaEvent( + agent_id=self._agent_id, + function_call_id=getattr(item, "call_id", None), + function_call_name=getattr(item, "name", None), + function_call_arguments=getattr(item, "arguments", None), + role=getattr(update, "role", None), + ) + ) + elif isinstance(item, FunctionResultContent): + await ctx.add_event( + MagenticAgentDeltaEvent( + agent_id=self._agent_id, + function_result_id=getattr(item, "call_id", None), + function_result=getattr(item, "result", None), + role=getattr(update, "role", None), + ) + ) + + async def _emit_agent_message_event( + self, + ctx: WorkflowContext[Any, Any], + message: ChatMessage, + ) -> None: + await ctx.add_event(MagenticAgentMessageEvent(agent_id=self._agent_id, message=message)) + + async def _invoke_agent( + self, + ctx: WorkflowContext[_MagenticResponseMessage, AgentRunResponse], + ) -> ChatMessage: """Invoke the wrapped agent and return a response.""" logger.debug(f"Agent {self._agent_id}: Running with {len(self._chat_history)} messages") @@ -1674,20 +1852,10 @@ async def _invoke_agent(self) -> ChatMessage: agent = cast("AgentProtocol", self._agent) async for update in agent.run_stream(messages=self._chat_history): # type: ignore[attr-defined] updates.append(update) - if self._streaming_agent_response_callback is not None: - with contextlib.suppress(Exception): - await self._streaming_agent_response_callback( - self._agent_id, - update, - False, - ) + await self._emit_agent_delta_event(ctx, update) run_result: AgentRunResponse = AgentRunResponse.from_agent_run_response_updates(updates) - # mark final using last update if available - if updates and self._streaming_agent_response_callback is not None: - with contextlib.suppress(Exception): - await self._streaming_agent_response_callback(self._agent_id, updates[-1], True) messages: list[ChatMessage] | None = None with contextlib.suppress(Exception): messages = list(run_result.messages) # type: ignore[assignment] @@ -1697,9 +1865,7 @@ async def _invoke_agent(self) -> ChatMessage: role: Role = last.role if last.role else Role.ASSISTANT text = last.text or str(last) msg = ChatMessage(role=role, text=text, author_name=author) - if self._agent_response_callback is not None: - with contextlib.suppress(Exception): - await self._agent_response_callback(self._agent_id, msg) + await self._emit_agent_message_event(ctx, msg) return msg msg = ChatMessage( @@ -1707,9 +1873,7 @@ async def _invoke_agent(self) -> ChatMessage: text=f"Agent {self._agent_id}: No output produced", author_name=self._agent_id, ) - if self._agent_response_callback is not None: - with contextlib.suppress(Exception): - await self._agent_response_callback(self._agent_id, msg) + await self._emit_agent_message_event(ctx, msg) return msg @@ -1719,35 +1883,194 @@ async def _invoke_agent(self) -> ChatMessage: class MagenticBuilder: - """High-level builder for creating Magentic One workflows.""" + """Fluent builder for creating Magentic One multi-agent orchestration workflows. + + Magentic One workflows use an LLM-powered manager to coordinate multiple agents through + dynamic task planning, progress tracking, and adaptive replanning. The manager creates + plans, selects agents, monitors progress, and determines when to replan or complete. + + The builder provides a fluent API for configuring participants, the manager, optional + plan review, checkpointing, and event callbacks. + + Usage: + + .. code-block:: python + + from agent_framework import MagenticBuilder, StandardMagenticManager + from azure.ai.projects.aio import AIProjectClient + + # Create manager with LLM client + project_client = AIProjectClient.from_connection_string(...) + chat_client = project_client.inference.get_chat_completions_client() + + # Build Magentic workflow with agents + workflow = ( + MagenticBuilder() + .participants(researcher=research_agent, writer=writing_agent, coder=coding_agent) + .with_standard_manager(chat_client=chat_client, max_round_count=20, max_stall_count=3) + .with_plan_review(enable=True) + .with_checkpointing(checkpoint_storage) + .build() + ) + + # Execute workflow + async for message in workflow.run("Research and write article about AI agents"): + print(message.text) + + With custom manager: + + .. code-block:: python + + # Create custom manager subclass + class MyCustomManager(MagenticManagerBase): + async def plan(self, context: MagenticContext) -> ChatMessage: + # Custom planning logic + ... + + + manager = MyCustomManager() + workflow = MagenticBuilder().participants(agent1=agent1, agent2=agent2).with_standard_manager(manager).build() + + See Also: + - :class:`MagenticManagerBase`: Base class for custom managers + - :class:`StandardMagenticManager`: Default LLM-powered manager + - :class:`MagenticContext`: Context object passed to manager methods + - :class:`MagenticEvent`: Base class for workflow events + """ def __init__(self) -> None: self._participants: dict[str, AgentProtocol | Executor] = {} self._manager: MagenticManagerBase | None = None - self._exception_callback: Callable[[Exception], None] | None = None - self._result_callback: Callable[[ChatMessage], Awaitable[None]] | None = None - # Orchestrator-emitted message callback: (orchestrator_id, message, kind) - self._message_callback: Callable[[str, ChatMessage, str], Awaitable[None]] | None = None - self._agent_response_callback: Callable[[str, ChatMessage], Awaitable[None]] | None = None - self._agent_streaming_callback: Callable[[str, AgentRunResponseUpdate, bool], Awaitable[None]] | None = None self._enable_plan_review: bool = False - # Unified callback wiring - self._unified_callback: CallbackSink | None = None - self._callback_mode: MagenticCallbackMode | None = None self._checkpoint_storage: CheckpointStorage | None = None def participants(self, **participants: AgentProtocol | Executor) -> Self: - """Add participants (agents) to the workflow.""" + """Add participant agents or executors to the Magentic workflow. + + Participants are the agents that will execute tasks under the manager's direction. + Each participant should have distinct capabilities that complement the team. The + manager will select which participant to invoke based on the current plan and + progress state. + + Args: + **participants: Named agents or executors to add to the workflow. Names should + be descriptive of the agent's role (e.g., researcher=research_agent). + Accepts BaseAgent instances or custom Executor implementations. + + Returns: + Self for method chaining + + Usage: + + .. code-block:: python + + workflow = ( + MagenticBuilder() + .participants( + researcher=research_agent, writer=writing_agent, coder=coding_agent, reviewer=review_agent + ) + .with_standard_manager(chat_client=client) + .build() + ) + + Notes: + - Participant names become part of the manager's context for selection + - Agent descriptions (if available) are extracted and provided to the manager + - Can be called multiple times to add participants incrementally + """ self._participants.update(participants) return self def with_plan_review(self, enable: bool = True) -> "MagenticBuilder": - """Require human sign-off on the plan before coordination begins.""" + """Enable or disable human-in-the-loop plan review before task execution. + + When enabled, the workflow will pause after the manager generates the initial + plan and emit a _MagenticPlanReviewRequest event. A human reviewer can then + approve, request revisions, or reject the plan. The workflow continues only + after approval. + + This is useful for: + - High-stakes tasks requiring human oversight + - Validating the manager's understanding of requirements + - Catching hallucinations or unrealistic plans early + - Educational scenarios where learners review AI planning + + Args: + enable: Whether to require plan review (default True) + + Returns: + Self for method chaining + + Usage: + + .. code-block:: python + + workflow = ( + MagenticBuilder() + .participants(agent1=agent1) + .with_standard_manager(chat_client=client) + .with_plan_review(enable=True) + .build() + ) + + # During execution, handle plan review + async for event in workflow.run_stream("task"): + if isinstance(event, _MagenticPlanReviewRequest): + # Review plan and respond + reply = _MagenticPlanReviewReply(decision=MagenticPlanReviewDecision.APPROVE) + await workflow.send(reply) + + See Also: + - :class:`_MagenticPlanReviewRequest`: Event emitted for review + - :class:`_MagenticPlanReviewReply`: Response to send back + - :class:`MagenticPlanReviewDecision`: Approve/Revise/Reject options + """ self._enable_plan_review = enable return self def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "MagenticBuilder": - """Persist workflow state using the provided checkpoint storage.""" + """Enable workflow state persistence using the provided checkpoint storage. + + Checkpointing allows workflows to be paused, resumed across process restarts, + or recovered after failures. The entire workflow state including conversation + history, task ledgers, and progress is persisted at key points. + + Args: + checkpoint_storage: Storage backend for checkpoints (e.g., InMemoryCheckpointStorage, + FileCheckpointStorage, or custom implementations) + + Returns: + Self for method chaining + + Usage: + + .. code-block:: python + + from agent_framework import InMemoryCheckpointStorage + + storage = InMemoryCheckpointStorage() + workflow = ( + MagenticBuilder() + .participants(agent1=agent1) + .with_standard_manager(chat_client=client) + .with_checkpointing(storage) + .build() + ) + + # First run + thread_id = "task-123" + async for msg in workflow.run("task", thread_id=thread_id): + print(msg.text) + + # Resume from checkpoint + async for msg in workflow.run("continue", thread_id=thread_id): + print(msg.text) + + Notes: + - Checkpoints are created after each significant state transition + - Thread ID must be consistent across runs to resume properly + - Storage implementations may have different persistence guarantees + """ self._checkpoint_storage = checkpoint_storage return self @@ -1757,7 +2080,7 @@ def with_standard_manager( *, # Constructor args for StandardMagenticManager when manager is not provided chat_client: ChatClientProtocol | None = None, - task_ledger: MagenticTaskLedger | None = None, + task_ledger: _MagenticTaskLedger | None = None, instructions: str | None = None, # Prompt overrides task_ledger_facts_prompt: str | None = None, @@ -1772,18 +2095,106 @@ def with_standard_manager( max_reset_count: int | None = None, max_round_count: int | None = None, ) -> Self: - """Configure the Magentic manager. + """Configure the workflow manager for task planning and agent coordination. - Usage patterns: - - Provide an existing manager instance (recommended for custom or preconfigured managers): - builder.with_standard_manager(my_manager) - - Or pass explicit kwargs to construct a StandardMagenticManager for you: - builder.with_standard_manager(chat_client=my_client, max_round_count=10, max_stall_count=3) + The manager is responsible for creating plans, selecting agents, tracking progress, + and deciding when to replan or complete. This method supports two usage patterns: + + 1. **Provide existing manager**: Pass a pre-configured manager instance (custom + or standard) for full control over behavior + 2. **Auto-create standard manager**: Pass chat_client and options to automatically + create a StandardMagenticManager with specified configuration + + Args: + manager: Pre-configured manager instance (StandardMagenticManager or custom + MagenticManagerBase subclass). If provided, all other arguments are ignored. + chat_client: LLM chat client for generating plans and decisions. Required if + manager is not provided. + task_ledger: Optional custom task ledger implementation for specialized + prompting or structured output requirements + instructions: System instructions prepended to all manager prompts to guide + behavior and set expectations + task_ledger_facts_prompt: Custom prompt template for extracting facts from + task description + task_ledger_plan_prompt: Custom prompt template for generating initial plan + task_ledger_full_prompt: Custom prompt template for complete task ledger + (facts + plan combined) + task_ledger_facts_update_prompt: Custom prompt template for updating facts + based on agent progress + task_ledger_plan_update_prompt: Custom prompt template for replanning when + needed + progress_ledger_prompt: Custom prompt template for assessing progress and + determining next actions + final_answer_prompt: Custom prompt template for synthesizing final response + when task is complete + max_stall_count: Maximum consecutive rounds without progress before triggering + replan (default 3). Set to 0 to disable stall detection. + max_reset_count: Maximum number of complete resets allowed before failing. + None means unlimited resets. + max_round_count: Maximum total coordination rounds before stopping with + partial result. None means unlimited rounds. + + Returns: + Self for method chaining + + Raises: + ValueError: If manager is None and chat_client is also None + + Usage with auto-created manager: + + .. code-block:: python + + from azure.ai.projects.aio import AIProjectClient + + project_client = AIProjectClient.from_connection_string(...) + chat_client = project_client.inference.get_chat_completions_client() + + workflow = ( + MagenticBuilder() + .participants(agent1=agent1, agent2=agent2) + .with_standard_manager( + chat_client=chat_client, + max_round_count=20, + max_stall_count=3, + instructions="Be concise and focus on accuracy", + ) + .build() + ) + + Usage with custom manager: + + .. code-block:: python + + class MyManager(MagenticManagerBase): + async def plan(self, context: MagenticContext) -> ChatMessage: + # Custom planning logic + return ChatMessage(role=Role.ASSISTANT, text="...") + + + manager = MyManager() + workflow = MagenticBuilder().participants(agent1=agent1).with_standard_manager(manager).build() + + Usage with prompt customization: + + .. code-block:: python + + workflow = ( + MagenticBuilder() + .participants(coder=coder_agent, reviewer=reviewer_agent) + .with_standard_manager( + chat_client=chat_client, + task_ledger_plan_prompt="Create a detailed step-by-step plan...", + progress_ledger_prompt="Assess progress and decide next action...", + max_stall_count=2, + ) + .build() + ) Notes: - - If ``manager`` is provided, it is used as-is (can be a StandardMagenticManager or any MagenticManagerBase). - - If not provided, ``chat_client`` is required and a new StandardMagenticManager will be created - with the provided options. + - StandardMagenticManager uses structured LLM calls for all decisions + - Custom managers can implement alternative selection strategies + - Prompt templates support Jinja2-style variable substitution + - Stall detection helps prevent infinite loops in stuck scenarios """ if manager is not None: self._manager = manager @@ -1811,29 +2222,7 @@ def with_standard_manager( ) return self - def on_exception(self, callback: Callable[[Exception], None]) -> Self: - """Set the exception callback.""" - self._exception_callback = callback - return self - - def on_result(self, callback: Callable[[ChatMessage], Awaitable[None]]) -> Self: - """Set the result callback.""" - self._result_callback = callback - return self - - def on_event( - self, callback: CallbackSink, *, mode: MagenticCallbackMode = MagenticCallbackMode.NON_STREAMING - ) -> Self: - """Register a single sink for all workflow, orchestrator, and agent events. - - mode=STREAMING yields AgentDeltaEvent plus AgentMessageEvent at the end. - mode=NON_STREAMING only yields AgentMessageEvent at the end (no deltas). - """ - self._unified_callback = callback - self._callback_mode = mode - return self - - def build(self) -> "MagenticWorkflow": + def build(self) -> Workflow: """Build a Magentic workflow with the orchestrator and all agent executors.""" if not self._participants: raise ValueError("No participants added to Magentic workflow") @@ -1846,150 +2235,49 @@ def build(self) -> "MagenticWorkflow": # Create participant descriptions participant_descriptions: dict[str, str] = {} for name, participant in self._participants.items(): - if isinstance(participant, BaseAgent): - description = getattr(participant, "description", None) or f"Agent {name}" - else: - description = f"Executor {name}" - participant_descriptions[name] = description - - # If unified sink is provided, map it to legacy callback surfaces - unified = self._unified_callback - mode = self._callback_mode - - if unified is not None: - prior_result = self._result_callback - - async def _on_result(msg: ChatMessage) -> None: - with contextlib.suppress(Exception): - await unified(MagenticFinalResultEvent(message=msg)) - if prior_result is not None: - with contextlib.suppress(Exception): - await prior_result(msg) - - async def _on_orch(orch_id: str, msg: ChatMessage, kind: str) -> None: - with contextlib.suppress(Exception): - await unified(MagenticOrchestratorMessageEvent(orchestrator_id=orch_id, message=msg, kind=kind)) - - async def _on_agent_final(agent_id: str, message: ChatMessage) -> None: - with contextlib.suppress(Exception): - await unified(MagenticAgentMessageEvent(agent_id=agent_id, message=message)) - - async def _on_agent_delta(agent_id: str, update: AgentRunResponseUpdate, is_final: bool) -> None: - if mode == MagenticCallbackMode.STREAMING: - # TODO(evmattso): Make sure we surface other non-text streaming items - # (or per-type events) and plumb through consumers. - chunk: str | None = getattr(update, "text", None) - if not chunk: - with contextlib.suppress(Exception): - contents = getattr(update, "contents", []) or [] - chunk = "".join(getattr(c, "text", "") for c in contents) or None - if chunk: - with contextlib.suppress(Exception): - await unified( - MagenticAgentDeltaEvent( - agent_id=agent_id, - text=chunk, - role=getattr(update, "role", None), - ) - ) - # Emit function call/result items if present on the update - with contextlib.suppress(Exception): - content_items = getattr(update, "contents", []) or [] - for item in content_items: - if isinstance(item, FunctionCallContent): - await unified( - MagenticAgentDeltaEvent( - agent_id=agent_id, - function_call_id=getattr(item, "call_id", None), - function_call_name=getattr(item, "name", None), - function_call_arguments=getattr(item, "arguments", None), - role=getattr(update, "role", None), - ) - ) - elif isinstance(item, FunctionResultContent): - await unified( - MagenticAgentDeltaEvent( - agent_id=agent_id, - function_result_id=getattr(item, "call_id", None), - function_result=getattr(item, "result", None), - role=getattr(update, "role", None), - ) - ) - # final aggregation handled by _on_agent_final via agent_response_callback - - # Override delegates for orchestrator and agent callbacks - self._result_callback = _on_result - self._message_callback = _on_orch - self._agent_response_callback = _on_agent_final - self._agent_streaming_callback = _on_agent_delta if mode == MagenticCallbackMode.STREAMING else None - - # Create orchestrator executor - orchestrator_executor = MagenticOrchestratorExecutor( - manager=self._manager, - participants=participant_descriptions, - result_callback=self._result_callback, - message_callback=self._message_callback, - agent_response_callback=self._agent_response_callback, - streaming_agent_response_callback=self._agent_streaming_callback, - require_plan_signoff=self._enable_plan_review, - executor_id="magentic_orchestrator", - ) - - # Create workflow builder and set orchestrator as start - workflow_builder = WorkflowBuilder().set_start_executor(orchestrator_executor) - - if self._enable_plan_review: - from ._request_info_executor import RequestInfoExecutor - - request_info = RequestInfoExecutor(id="magentic_plan_review") - workflow_builder = ( - workflow_builder - # Only route plan review asks to request_info - .add_edge( - orchestrator_executor, - request_info, - condition=lambda msg: isinstance(msg, MagenticPlanReviewRequest), - ).add_edge(request_info, orchestrator_executor) + fallback = f"Executor {name}" if isinstance(participant, Executor) else f"Agent {name}" + participant_descriptions[name] = participant_description(participant, fallback) + + # Type narrowing: we already checked self._manager is not None above + manager: MagenticManagerBase = self._manager # type: ignore[assignment] + + def _orchestrator_factory(wiring: _GroupChatConfig) -> Executor: + return MagenticOrchestratorExecutor( + manager=manager, + participants=participant_descriptions, + require_plan_signoff=self._enable_plan_review, + executor_id="magentic_orchestrator", ) - def _route_to_agent(msg: object, *, agent_name: str) -> bool: - """Route only messages meant for this agent. - - - MagenticRequestMessage -> only to the named agent - - MagenticResponseMessage -> broadcast=True to all, or target_agent==agent_name - Everything else (e.g., RequestInfoMessage) -> do not route to agents. - """ - if isinstance(msg, MagenticRequestMessage): - return msg.agent_name == agent_name - if isinstance(msg, MagenticResponseMessage): - return bool(getattr(msg, "broadcast", False)) or getattr(msg, "target_agent", None) == agent_name - return False - - # Add agent executors and connect them - for name, participant in self._participants.items(): + def _participant_factory( + spec: GroupChatParticipantSpec, + wiring: _GroupChatConfig, + ) -> _GroupChatParticipantPipeline: agent_executor = MagenticAgentExecutor( - participant, - name, - agent_response_callback=self._agent_response_callback, - streaming_agent_response_callback=self._agent_streaming_callback, + spec.participant, + spec.name, ) - # Register for internal control (e.g., reset) - orchestrator_executor.register_agent_executor(name, agent_executor) + orchestrator = wiring.orchestrator + if isinstance(orchestrator, MagenticOrchestratorExecutor): + orchestrator.register_agent_executor(spec.name, agent_executor) + return (agent_executor,) - # Add bidirectional edges between orchestrator and agent - def _cond(msg: object, _an: str = name) -> bool: - return _route_to_agent(msg, agent_name=_an) - - workflow_builder = workflow_builder.add_edge( - orchestrator_executor, - agent_executor, - condition=_cond, - ).add_edge(agent_executor, orchestrator_executor) + # Magentic provides its own orchestrator via custom factory, so no manager is needed + group_builder = GroupChatBuilder( + _orchestrator_factory=group_chat_orchestrator(_orchestrator_factory), + _participant_factory=_participant_factory, + ).participants(self._participants) if self._checkpoint_storage is not None: - workflow_builder = workflow_builder.with_checkpointing(self._checkpoint_storage) + group_builder = group_builder.with_checkpointing(self._checkpoint_storage) - return MagenticWorkflow(workflow_builder.build()) + if self._enable_plan_review: + group_builder = group_builder.with_request_handler( + lambda _wiring: RequestInfoExecutor(id="magentic_plan_review"), + condition=lambda msg: isinstance(msg, _MagenticPlanReviewRequest), + ) + + return group_builder.build() def start_with_string(self, task: str) -> "MagenticWorkflow": """Build a Magentic workflow and return a wrapper with convenience methods for string tasks. @@ -2000,7 +2288,7 @@ def start_with_string(self, task: str) -> "MagenticWorkflow": Returns: A MagenticWorkflow wrapper that provides convenience methods for starting with strings. """ - return MagenticWorkflow(self.build().workflow, task) + return MagenticWorkflow(self.build(), task) def start_with_message(self, task: ChatMessage) -> "MagenticWorkflow": """Build a Magentic workflow and return a wrapper with convenience methods for ChatMessage tasks. @@ -2011,7 +2299,7 @@ def start_with_message(self, task: ChatMessage) -> "MagenticWorkflow": Returns: A MagenticWorkflow wrapper that provides convenience methods. """ - return MagenticWorkflow(self.build().workflow, task.text) + return MagenticWorkflow(self.build(), task.text) def start_with(self, task: str | ChatMessage) -> "MagenticWorkflow": """Build a Magentic workflow and return a wrapper with convenience methods. @@ -2054,7 +2342,7 @@ async def run_streaming_with_string(self, task_text: str) -> AsyncIterable[Workf Yields: WorkflowEvent: The events generated during the workflow execution. """ - start_message = MagenticStartMessage.from_string(task_text) + start_message = _MagenticStartMessage.from_string(task_text) async for event in self._workflow.run_stream(start_message): yield event @@ -2067,7 +2355,7 @@ async def run_streaming_with_message(self, task_message: ChatMessage) -> AsyncIt Yields: WorkflowEvent: The events generated during the workflow execution. """ - start_message = MagenticStartMessage(task=task_message) + start_message = _MagenticStartMessage(task_message) async for event in self._workflow.run_stream(start_message): yield event @@ -2084,11 +2372,11 @@ async def run_stream(self, message: Any | None = None) -> AsyncIterable[Workflow if message is None: if self._task_text is None: raise ValueError("No message provided and no preset task text available") - message = MagenticStartMessage.from_string(self._task_text) + message = _MagenticStartMessage.from_string(self._task_text) elif isinstance(message, str): - message = MagenticStartMessage.from_string(message) - elif isinstance(message, ChatMessage): - message = MagenticStartMessage(task=message) + message = _MagenticStartMessage.from_string(message) + elif isinstance(message, (ChatMessage, list)): + message = _MagenticStartMessage(message) # type: ignore[arg-type] async for event in self._workflow.run_stream(message): yield event @@ -2257,3 +2545,7 @@ def __getattr__(self, name: str) -> Any: # endregion Magentic Workflow + +# Public aliases for types needed by users implementing custom plan review handlers +MagenticPlanReviewRequest = _MagenticPlanReviewRequest +MagenticPlanReviewReply = _MagenticPlanReviewReply diff --git a/python/packages/core/agent_framework/_workflows/_message_utils.py b/python/packages/core/agent_framework/_workflows/_message_utils.py new file mode 100644 index 0000000000..ad4a9b55f6 --- /dev/null +++ b/python/packages/core/agent_framework/_workflows/_message_utils.py @@ -0,0 +1,43 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Shared helpers for normalizing workflow message inputs.""" + +from collections.abc import Sequence + +from agent_framework import ChatMessage, Role + + +def normalize_messages_input( + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, +) -> list[ChatMessage]: + """Normalize heterogeneous message inputs to a list of ChatMessage objects. + + Args: + messages: String, ChatMessage, or sequence of either. None yields empty list. + + Returns: + List of ChatMessage instances suitable for workflow consumption. + """ + if messages is None: + return [] + + if isinstance(messages, str): + return [ChatMessage(role=Role.USER, text=messages)] + + if isinstance(messages, ChatMessage): + return [messages] + + normalized: list[ChatMessage] = [] + for item in messages: + if isinstance(item, str): + normalized.append(ChatMessage(role=Role.USER, text=item)) + elif isinstance(item, ChatMessage): + normalized.append(item) + else: + raise TypeError( + f"Messages sequence must contain only str or ChatMessage instances; found {type(item).__name__}." + ) + return normalized + + +__all__ = ["normalize_messages_input"] diff --git a/python/packages/core/agent_framework/_workflows/_model_utils.py b/python/packages/core/agent_framework/_workflows/_model_utils.py index 58bd614b34..72380901c6 100644 --- a/python/packages/core/agent_framework/_workflows/_model_utils.py +++ b/python/packages/core/agent_framework/_workflows/_model_utils.py @@ -2,7 +2,7 @@ import copy import sys -from typing import Any, TypeVar +from typing import Any, TypeVar, cast if sys.version_info >= (3, 11): from typing import Self # pragma: no cover @@ -37,7 +37,7 @@ def from_json(cls: type[TModel], raw: str) -> TModel: data = json.loads(raw) if not isinstance(data, dict): raise ValueError("JSON payload must decode to a mapping") - return cls.from_dict(data) + return cls.from_dict(cast(dict[str, Any], data)) def encode_value(value: Any) -> Any: diff --git a/python/packages/core/agent_framework/_workflows/_orchestration_state.py b/python/packages/core/agent_framework/_workflows/_orchestration_state.py new file mode 100644 index 0000000000..26c0068e7a --- /dev/null +++ b/python/packages/core/agent_framework/_workflows/_orchestration_state.py @@ -0,0 +1,92 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unified state management for group chat orchestrators. + +Provides OrchestrationState dataclass for standardized checkpoint serialization +across GroupChat, Handoff, and Magentic patterns. +""" + +from dataclasses import dataclass, field +from typing import Any + +from .._types import ChatMessage + + +def _new_chat_message_list() -> list[ChatMessage]: + """Factory function for typed empty ChatMessage list. + + Satisfies the type checker. + """ + return [] + + +def _new_metadata_dict() -> dict[str, Any]: + """Factory function for typed empty metadata dict. + + Satisfies the type checker. + """ + return {} + + +@dataclass +class OrchestrationState: + """Unified state container for orchestrator checkpointing. + + This dataclass standardizes checkpoint serialization across all three + group chat patterns while allowing pattern-specific extensions via metadata. + + Common attributes cover shared orchestration concerns (task, conversation, + round tracking). Pattern-specific state goes in the metadata dict. + + Attributes: + conversation: Full conversation history (all messages) + round_index: Number of coordination rounds completed (0 if not tracked) + metadata: Extensible dict for pattern-specific state + task: Optional primary task/question being orchestrated + """ + + conversation: list[ChatMessage] = field(default_factory=_new_chat_message_list) + round_index: int = 0 + metadata: dict[str, Any] = field(default_factory=_new_metadata_dict) + task: ChatMessage | None = None + + def to_dict(self) -> dict[str, Any]: + """Serialize to dict for checkpointing. + + Returns: + Dict with encoded conversation and metadata for persistence + """ + from ._conversation_state import encode_chat_messages + + result: dict[str, Any] = { + "conversation": encode_chat_messages(self.conversation), + "round_index": self.round_index, + "metadata": dict(self.metadata), + } + if self.task is not None: + result["task"] = encode_chat_messages([self.task])[0] + return result + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> "OrchestrationState": + """Deserialize from checkpointed dict. + + Args: + data: Checkpoint data with encoded conversation + + Returns: + Restored OrchestrationState instance + """ + from ._conversation_state import decode_chat_messages + + task = None + if "task" in data: + decoded_tasks = decode_chat_messages([data["task"]]) + task = decoded_tasks[0] if decoded_tasks else None + + return cls( + conversation=decode_chat_messages(data.get("conversation", [])), + round_index=data.get("round_index", 0), + metadata=dict(data.get("metadata", {})), + task=task, + ) diff --git a/python/packages/core/agent_framework/_workflows/_orchestrator_helpers.py b/python/packages/core/agent_framework/_workflows/_orchestrator_helpers.py new file mode 100644 index 0000000000..85cde6abbb --- /dev/null +++ b/python/packages/core/agent_framework/_workflows/_orchestrator_helpers.py @@ -0,0 +1,190 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Shared orchestrator utilities for group chat patterns. + +This module provides simple, reusable functions for common orchestration tasks. +No inheritance required - just import and call. +""" + +import logging +from typing import TYPE_CHECKING, Any + +from .._types import ChatMessage, Role + +if TYPE_CHECKING: + from ._group_chat import _GroupChatRequestMessage # type: ignore[reportPrivateUsage] + +logger = logging.getLogger(__name__) + + +def clean_conversation_for_handoff(conversation: list[ChatMessage]) -> list[ChatMessage]: + """Remove tool-related content from conversation for clean handoffs. + + During handoffs, tool calls can cause API errors because: + 1. Assistant messages with tool_calls must be followed by tool responses + 2. Tool response messages must follow an assistant message with tool_calls + + This creates a cleaned copy removing ALL tool-related content. + + Removes: + - FunctionApprovalRequestContent and FunctionCallContent from assistant messages + - Tool response messages (Role.TOOL) + - Messages with only tool calls and no text + + Preserves: + - User messages + - Assistant messages with text content + + Args: + conversation: Original conversation with potential tool content + + Returns: + Cleaned conversation safe for handoff routing + """ + from agent_framework import FunctionApprovalRequestContent, FunctionCallContent + + cleaned: list[ChatMessage] = [] + for msg in conversation: + # Skip tool response messages entirely + if msg.role == Role.TOOL: + continue + + # Check for tool-related content + has_tool_content = False + if msg.contents: + has_tool_content = any( + isinstance(content, (FunctionApprovalRequestContent, FunctionCallContent)) for content in msg.contents + ) + + # If no tool content, keep original + if not has_tool_content: + cleaned.append(msg) + continue + + # Has tool content - only keep if it also has text + if msg.text and msg.text.strip(): + # Create fresh text-only message + msg_copy = ChatMessage( + role=msg.role, + text=msg.text, + author_name=msg.author_name, + ) + cleaned.append(msg_copy) + + return cleaned + + +def create_completion_message( + *, + text: str | None = None, + author_name: str, + reason: str = "completed", +) -> ChatMessage: + """Create a standardized completion message. + + Simple helper to avoid duplicating completion message creation. + + Args: + text: Message text, or None to generate default + author_name: Author/orchestrator name + reason: Reason for completion (for default text generation) + + Returns: + ChatMessage with ASSISTANT role + """ + message_text = text or f"Conversation {reason}." + return ChatMessage( + role=Role.ASSISTANT, + text=message_text, + author_name=author_name, + ) + + +def prepare_participant_request( + *, + participant_name: str, + conversation: list[ChatMessage], + instruction: str | None = None, + task: ChatMessage | None = None, + metadata: dict[str, Any] | None = None, +) -> "_GroupChatRequestMessage": + """Create a standardized participant request message. + + Simple helper to avoid duplicating request construction. + + Args: + participant_name: Name of the target participant + conversation: Conversation history to send + instruction: Optional instruction from manager/orchestrator + task: Optional task context + metadata: Optional metadata dict + + Returns: + GroupChatRequestMessage ready to send + """ + # Import here to avoid circular dependency + from ._group_chat import _GroupChatRequestMessage # type: ignore[reportPrivateUsage] + + return _GroupChatRequestMessage( + agent_name=participant_name, + conversation=list(conversation), + instruction=instruction or "", + task=task, + metadata=metadata, + ) + + +class ParticipantRegistry: + """Simple registry for tracking participant executor IDs and routing info. + + Provides a clean interface for the common pattern of mapping participant names + to executor IDs and tracking which are agents vs custom executors. + """ + + def __init__(self) -> None: + self._participant_entry_ids: dict[str, str] = {} + self._agent_executor_ids: dict[str, str] = {} + self._executor_id_to_participant: dict[str, str] = {} + self._non_agent_participants: set[str] = set() + + def register( + self, + name: str, + *, + entry_id: str, + is_agent: bool, + ) -> None: + """Register a participant's routing information. + + Args: + name: Participant name + entry_id: Executor ID for this participant's entry point + is_agent: Whether this is an AgentExecutor (True) or custom Executor (False) + """ + self._participant_entry_ids[name] = entry_id + + if is_agent: + self._agent_executor_ids[name] = entry_id + self._executor_id_to_participant[entry_id] = name + else: + self._non_agent_participants.add(name) + + def get_entry_id(self, name: str) -> str | None: + """Get the entry executor ID for a participant name.""" + return self._participant_entry_ids.get(name) + + def get_participant_name(self, executor_id: str) -> str | None: + """Get the participant name for an executor ID (agents only).""" + return self._executor_id_to_participant.get(executor_id) + + def is_agent(self, name: str) -> bool: + """Check if a participant is an agent (vs custom executor).""" + return name in self._agent_executor_ids + + def is_registered(self, name: str) -> bool: + """Check if a participant is registered.""" + return name in self._participant_entry_ids + + def all_participants(self) -> set[str]: + """Get all registered participant names.""" + return set(self._participant_entry_ids.keys()) diff --git a/python/packages/core/agent_framework/_workflows/_participant_utils.py b/python/packages/core/agent_framework/_workflows/_participant_utils.py new file mode 100644 index 0000000000..ac632a917d --- /dev/null +++ b/python/packages/core/agent_framework/_workflows/_participant_utils.py @@ -0,0 +1,136 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Shared participant helpers for orchestration builders.""" + +import re +from collections.abc import Callable, Iterable, Mapping +from dataclasses import dataclass +from typing import Any + +from .._agents import AgentProtocol +from ._agent_executor import AgentExecutor +from ._executor import Executor + + +@dataclass +class GroupChatParticipantSpec: + """Metadata describing a single participant in group chat orchestrations. + + Used by multiple orchestration patterns (GroupChat, Handoff, Magentic) to describe + participants with consistent structure across different workflow types. + + Attributes: + name: Unique identifier for the participant used by managers for selection + participant: AgentProtocol or Executor instance representing the participant + description: Human-readable description provided to managers for selection context + """ + + name: str + participant: AgentProtocol | Executor + description: str + + +_SANITIZE_PATTERN = re.compile(r"[^0-9a-zA-Z]+") + + +def sanitize_identifier(value: str, *, default: str = "agent") -> str: + """Return a deterministic, lowercase identifier derived from `value`.""" + cleaned = _SANITIZE_PATTERN.sub("_", value).strip("_") + if not cleaned: + cleaned = default + if cleaned[0].isdigit(): + cleaned = f"{default}_{cleaned}" + return cleaned.lower() + + +def wrap_participant(participant: AgentProtocol | Executor, *, executor_id: str | None = None) -> Executor: + """Represent `participant` as an `Executor`.""" + if isinstance(participant, Executor): + return participant + if not isinstance(participant, AgentProtocol): + raise TypeError( + f"Participants must implement AgentProtocol or be Executor instances. Got {type(participant).__name__}." + ) + name = getattr(participant, "name", None) + if executor_id is None: + if not name: + raise ValueError("Agent participants must expose a stable 'name' attribute.") + executor_id = str(name) + return AgentExecutor(participant, id=executor_id) + + +def participant_description(participant: AgentProtocol | Executor, fallback: str) -> str: + """Produce a human-readable description for manager context.""" + if isinstance(participant, Executor): + description = getattr(participant, "description", None) + if isinstance(description, str) and description.strip(): + return description.strip() + return fallback + description = getattr(participant, "description", None) + if isinstance(description, str) and description.strip(): + return description.strip() + return fallback + + +def build_alias_map(participant: AgentProtocol | Executor, executor: Executor) -> dict[str, str]: + """Collect canonical and sanitised aliases that should resolve to `executor`.""" + aliases: dict[str, str] = {} + + def _register(values: Iterable[str | None]) -> None: + for value in values: + if not value: + continue + key = str(value) + if key not in aliases: + aliases[key] = executor.id + sanitized = sanitize_identifier(key) + if sanitized not in aliases: + aliases[sanitized] = executor.id + + _register([executor.id]) + + if isinstance(participant, AgentProtocol): + name = getattr(participant, "name", None) + display = getattr(participant, "display_name", None) + _register([name, display]) + else: + display = getattr(participant, "display_name", None) + _register([display]) + + return aliases + + +def merge_alias_maps(maps: Iterable[Mapping[str, str]]) -> dict[str, str]: + """Merge alias mappings, preserving the first occurrence of each alias.""" + merged: dict[str, str] = {} + for mapping in maps: + for key, value in mapping.items(): + merged.setdefault(key, value) + return merged + + +def prepare_participant_metadata( + participants: Mapping[str, AgentProtocol | Executor], + *, + executor_id_factory: Callable[[str, AgentProtocol | Executor], str | None] | None = None, + description_factory: Callable[[str, AgentProtocol | Executor], str] | None = None, +) -> dict[str, dict[str, Any]]: + """Return metadata dicts for participants keyed by participant name.""" + executors: dict[str, Executor] = {} + descriptions: dict[str, str] = {} + alias_maps: list[Mapping[str, str]] = [] + + for name, participant in participants.items(): + desired_id = executor_id_factory(name, participant) if executor_id_factory else None + executor = wrap_participant(participant, executor_id=desired_id) + fallback_description = description_factory(name, participant) if description_factory else executor.id + descriptions[name] = participant_description(participant, fallback_description) + executors[name] = executor + alias_maps.append(build_alias_map(participant, executor)) + + aliases = merge_alias_maps(alias_maps) + return { + "executors": executors, + "descriptions": descriptions, + "aliases": aliases, + } diff --git a/python/packages/core/agent_framework/_workflows/_sequential.py b/python/packages/core/agent_framework/_workflows/_sequential.py index bfeae2780f..38fbc53c04 100644 --- a/python/packages/core/agent_framework/_workflows/_sequential.py +++ b/python/packages/core/agent_framework/_workflows/_sequential.py @@ -40,7 +40,7 @@ from collections.abc import Sequence from typing import Any -from agent_framework import AgentProtocol, ChatMessage, Role +from agent_framework import AgentProtocol, ChatMessage from ._agent_executor import ( AgentExecutor, @@ -51,6 +51,7 @@ Executor, handler, ) +from ._message_utils import normalize_messages_input from ._workflow import Workflow from ._workflow_builder import WorkflowBuilder from ._workflow_context import WorkflowContext @@ -63,16 +64,21 @@ class _InputToConversation(Executor): @handler async def from_str(self, prompt: str, ctx: WorkflowContext[list[ChatMessage]]) -> None: - await ctx.send_message([ChatMessage(Role.USER, text=prompt)]) + await ctx.send_message(normalize_messages_input(prompt)) @handler - async def from_message(self, message: ChatMessage, ctx: WorkflowContext[list[ChatMessage]]) -> None: # type: ignore[name-defined] - await ctx.send_message([message]) + async def from_message(self, message: ChatMessage, ctx: WorkflowContext[list[ChatMessage]]) -> None: + await ctx.send_message(normalize_messages_input(message)) @handler - async def from_messages(self, messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: # type: ignore[name-defined] + async def from_messages( + self, + messages: list[str | ChatMessage], + ctx: WorkflowContext[list[ChatMessage]], + ) -> None: # Make a copy to avoid mutation downstream - await ctx.send_message(list(messages)) + normalized = normalize_messages_input(messages) + await ctx.send_message(list(normalized)) class _ResponseToConversation(Executor): diff --git a/python/packages/core/agent_framework/_workflows/_typing_utils.py b/python/packages/core/agent_framework/_workflows/_typing_utils.py index f085fee5b1..de5d328ea9 100644 --- a/python/packages/core/agent_framework/_workflows/_typing_utils.py +++ b/python/packages/core/agent_framework/_workflows/_typing_utils.py @@ -4,56 +4,72 @@ from collections.abc import Mapping from dataclasses import fields, is_dataclass from types import UnionType -from typing import Any, Union, get_args, get_origin +from typing import Any, TypeVar, Union, cast, get_args, get_origin logger = logging.getLogger(__name__) +T = TypeVar("T") -def _coerce_to_type(value: Any, target_type: type) -> Any | None: - """Best-effort conversion of value into target_type.""" + +def _coerce_to_type(value: Any, target_type: type[T]) -> T | None: + """Best-effort conversion of value into target_type. + + Args: + value: The value to convert (can be dict, dataclass, or object with __dict__) + target_type: The target type to convert to + + Returns: + Instance of target_type if conversion succeeds, None otherwise + """ if isinstance(value, target_type): - return value + return value # type: ignore[return-value] # Convert dataclass instances or objects with __dict__ into dict first + value_as_dict: dict[str, Any] if not isinstance(value, dict): if is_dataclass(value): - value = {f.name: getattr(value, f.name) for f in fields(value)} + value_as_dict = {f.name: getattr(value, f.name) for f in fields(value)} else: value_dict = getattr(value, "__dict__", None) if isinstance(value_dict, dict): - value = dict(value_dict) - - if isinstance(value, dict): - ctor_kwargs: dict[str, Any] = dict(value) + value_as_dict = cast(dict[str, Any], value_dict) + else: + return None + else: + value_as_dict = cast(dict[str, Any], value) + + # Try to construct the target type from the dict + ctor_kwargs: dict[str, Any] = dict(value_as_dict) + + if is_dataclass(target_type): + field_names = {f.name for f in fields(target_type)} + ctor_kwargs = {k: v for k, v in value_as_dict.items() if k in field_names} + + try: + return target_type(**ctor_kwargs) # type: ignore[call-arg,return-value] + except TypeError as exc: + logger.debug(f"_coerce_to_type could not call {target_type.__name__}(**..): {exc}") + except Exception as exc: # pragma: no cover - unexpected constructor failure + logger.warning( + f"_coerce_to_type encountered unexpected error calling {target_type.__name__} constructor: {exc}" + ) - if is_dataclass(target_type): - field_names = {f.name for f in fields(target_type)} - ctor_kwargs = {k: v for k, v in value.items() if k in field_names} + # Fallback: try to create instance without __init__ and set attributes + try: + instance = object.__new__(target_type) + except Exception as exc: # pragma: no cover - pathological type + logger.debug(f"_coerce_to_type could not allocate {target_type.__name__} without __init__: {exc}") + return None + for key, val in value_as_dict.items(): try: - return target_type(**ctor_kwargs) # type: ignore[arg-type] - except TypeError as exc: - logger.debug(f"_coerce_to_type could not call {target_type.__name__}(**..): {exc}") - except Exception as exc: # pragma: no cover - unexpected constructor failure - logger.warning( - f"_coerce_to_type encountered unexpected error calling {target_type.__name__} constructor: {exc}" + setattr(instance, key, val) + except Exception as exc: + logger.debug( + f"_coerce_to_type could not set {target_type.__name__}.{key} during fallback assignment: {exc}" ) - try: - instance: Any = object.__new__(target_type) - except Exception as exc: # pragma: no cover - pathological type - logger.debug(f"_coerce_to_type could not allocate {target_type.__name__} without __init__: {exc}") - return None - for key, val in value.items(): - try: - setattr(instance, key, val) - except Exception as exc: - logger.debug( - f"_coerce_to_type could not set {target_type.__name__}.{key} during fallback assignment: {exc}" - ) - continue - return instance - - return None + continue + return instance # type: ignore[return-value] def is_instance_of(data: Any, target_type: type | UnionType | Any) -> bool: @@ -89,14 +105,14 @@ def is_instance_of(data: Any, target_type: type | UnionType | Any) -> bool: # Case 3: target_type is a generic type if origin in [list, set]: return isinstance(data, origin) and ( - not args or all(any(is_instance_of(item, arg) for arg in args) for item in data) + not args or all(any(is_instance_of(item, arg) for arg in args) for item in data) # type: ignore[misc] ) # type: ignore # Case 4: target_type is a tuple if origin is tuple: if len(args) == 2 and args[1] is Ellipsis: # Tuple[T, ...] case element_type = args[0] - return isinstance(data, tuple) and all(is_instance_of(item, element_type) for item in data) + return isinstance(data, tuple) and all(is_instance_of(item, element_type) for item in data) # type: ignore[misc] if len(args) == 1 and args[0] is Ellipsis: # Tuple[...] case return isinstance(data, tuple) if len(args) == 0: @@ -135,7 +151,7 @@ def is_instance_of(data: Any, target_type: type | UnionType | Any) -> bool: # and validators still receive a fully typed RequestResponse instance. original_request = data.original_request if isinstance(original_request, Mapping): - coerced = _coerce_to_type(dict(original_request), request_type) + coerced = _coerce_to_type(dict(original_request), request_type) # type: ignore[arg-type] if coerced is None or not isinstance(coerced, request_type): return False data.original_request = coerced diff --git a/python/packages/core/agent_framework/_workflows/_workflow.py b/python/packages/core/agent_framework/_workflows/_workflow.py index e24d25c8a2..ed9352bb42 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow.py +++ b/python/packages/core/agent_framework/_workflows/_workflow.py @@ -838,11 +838,24 @@ def output_types(self) -> list[type[Any]]: def as_agent(self, name: str | None = None) -> WorkflowAgent: """Create a WorkflowAgent that wraps this workflow. + The returned agent converts standard agent inputs (strings, ChatMessage, or lists of these) + into a list[ChatMessage] that is passed to the workflow's start executor. This conversion + happens in WorkflowAgent._normalize_messages() which transforms: + - str -> [ChatMessage(role=USER, text=str)] + - ChatMessage -> [ChatMessage] + - list[str | ChatMessage] -> list[ChatMessage] (with string elements converted) + + The workflow's start executor must accept list[ChatMessage] as an input type, otherwise + initialization will fail with a ValueError. + Args: name: Optional name for the agent. If None, a default name will be generated. Returns: A WorkflowAgent instance that wraps this workflow. + + Raises: + ValueError: If the workflow's start executor cannot handle list[ChatMessage] input. """ # Import here to avoid circular imports from ._agent import WorkflowAgent diff --git a/python/packages/core/agent_framework/_workflows/_workflow_context.py b/python/packages/core/agent_framework/_workflows/_workflow_context.py index 1a9562fca6..7b23c3aa5e 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_context.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_context.py @@ -21,7 +21,7 @@ WorkflowStartedEvent, WorkflowStatusEvent, WorkflowWarningEvent, - _framework_event_origin, + _framework_event_origin, # type: ignore ) from ._runner_context import Message, RunnerContext from ._shared_state import SharedState diff --git a/python/packages/core/tests/workflow/test_group_chat.py b/python/packages/core/tests/workflow/test_group_chat.py new file mode 100644 index 0000000000..01942a8703 --- /dev/null +++ b/python/packages/core/tests/workflow/test_group_chat.py @@ -0,0 +1,744 @@ +# Copyright (c) Microsoft. All rights reserved. + +from collections.abc import AsyncIterable, Callable +from typing import Any + +import pytest + +from agent_framework import ( + AgentRunResponse, + AgentRunResponseUpdate, + AgentThread, + BaseAgent, + ChatMessage, + GroupChatBuilder, + GroupChatDirective, + GroupChatStateSnapshot, + MagenticAgentMessageEvent, + MagenticBuilder, + MagenticContext, + MagenticManagerBase, + MagenticOrchestratorMessageEvent, + Role, + TextContent, + Workflow, + WorkflowOutputEvent, +) +from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage +from agent_framework._workflows._group_chat import ( + GroupChatOrchestratorExecutor, + _default_orchestrator_factory, # type: ignore + _GroupChatConfig, # type: ignore + _PromptBasedGroupChatManager, # type: ignore + _SpeakerSelectorAdapter, # type: ignore +) +from agent_framework._workflows._magentic import ( + _MagenticProgressLedger, # type: ignore + _MagenticProgressLedgerItem, # type: ignore + _MagenticStartMessage, # type: ignore +) + + +class StubAgent(BaseAgent): + def __init__(self, agent_name: str, reply_text: str, **kwargs: Any) -> None: + super().__init__(name=agent_name, description=f"Stub agent {agent_name}", **kwargs) + self._reply_text = reply_text + + async def run( # type: ignore[override] + self, + messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AgentRunResponse: + response = ChatMessage(role=Role.ASSISTANT, text=self._reply_text, author_name=self.name) + return AgentRunResponse(messages=[response]) + + def run_stream( # type: ignore[override] + self, + messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AsyncIterable[AgentRunResponseUpdate]: + async def _stream() -> AsyncIterable[AgentRunResponseUpdate]: + yield AgentRunResponseUpdate( + contents=[TextContent(text=self._reply_text)], role=Role.ASSISTANT, author_name=self.name + ) + + return _stream() + + +def make_sequence_selector() -> Callable[[GroupChatStateSnapshot], Any]: + state_counter = {"value": 0} + + async def _selector(state: GroupChatStateSnapshot) -> str | None: + participants = list(state["participants"].keys()) + step = state_counter["value"] + if step == 0: + state_counter["value"] = step + 1 + return participants[0] + if step == 1 and len(participants) > 1: + state_counter["value"] = step + 1 + return participants[1] + return None + + _selector.name = "manager" # type: ignore[attr-defined] + return _selector + + +class StubMagenticManager(MagenticManagerBase): + def __init__(self) -> None: + super().__init__(max_stall_count=3, max_round_count=5) + self._round = 0 + + async def plan(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage(role=Role.ASSISTANT, text="plan", author_name="magentic_manager") + + async def replan(self, magentic_context: MagenticContext) -> ChatMessage: + return await self.plan(magentic_context) + + async def create_progress_ledger(self, magentic_context: MagenticContext) -> _MagenticProgressLedger: + participants = list(magentic_context.participant_descriptions.keys()) + target = participants[0] if participants else "agent" + if self._round == 0: + self._round += 1 + return _MagenticProgressLedger( + is_request_satisfied=_MagenticProgressLedgerItem(reason="", answer=False), + is_in_loop=_MagenticProgressLedgerItem(reason="", answer=False), + is_progress_being_made=_MagenticProgressLedgerItem(reason="", answer=True), + next_speaker=_MagenticProgressLedgerItem(reason="", answer=target), + instruction_or_question=_MagenticProgressLedgerItem(reason="", answer="respond"), + ) + return _MagenticProgressLedger( + is_request_satisfied=_MagenticProgressLedgerItem(reason="", answer=True), + is_in_loop=_MagenticProgressLedgerItem(reason="", answer=False), + is_progress_being_made=_MagenticProgressLedgerItem(reason="", answer=True), + next_speaker=_MagenticProgressLedgerItem(reason="", answer=target), + instruction_or_question=_MagenticProgressLedgerItem(reason="", answer=""), + ) + + async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: + return ChatMessage(role=Role.ASSISTANT, text="final", author_name="magentic_manager") + + +async def test_group_chat_builder_basic_flow() -> None: + selector = make_sequence_selector() + alpha = StubAgent("alpha", "ack from alpha") + beta = StubAgent("beta", "ack from beta") + + workflow = ( + GroupChatBuilder() + .select_speakers(selector, display_name="manager", final_message="done") + .participants(alpha=alpha, beta=beta) + .build() + ) + + outputs: list[ChatMessage] = [] + async for event in workflow.run_stream("coordinate task"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, ChatMessage): + outputs.append(data) + + assert len(outputs) == 1 + assert outputs[0].text == "done" + assert outputs[0].author_name == "manager" + + +async def test_magentic_builder_returns_workflow_and_runs() -> None: + manager = StubMagenticManager() + agent = StubAgent("writer", "first draft") + + workflow = MagenticBuilder().participants(writer=agent).with_standard_manager(manager=manager).build() + + assert isinstance(workflow, Workflow) + + outputs: list[ChatMessage] = [] + orchestrator_events: list[MagenticOrchestratorMessageEvent] = [] + agent_events: list[MagenticAgentMessageEvent] = [] + start_message = _MagenticStartMessage.from_string("compose summary") + async for event in workflow.run_stream(start_message): + if isinstance(event, MagenticOrchestratorMessageEvent): + orchestrator_events.append(event) + if isinstance(event, MagenticAgentMessageEvent): + agent_events.append(event) + if isinstance(event, WorkflowOutputEvent): + msg = event.data + if isinstance(msg, ChatMessage): + outputs.append(msg) + + assert outputs, "Expected a final output message" + final = outputs[-1] + assert final.text == "final" + assert final.author_name == "magentic_manager" + assert orchestrator_events, "Expected orchestrator events to be emitted" + assert agent_events, "Expected agent message events to be emitted" + + +async def test_group_chat_as_agent_accepts_conversation() -> None: + selector = make_sequence_selector() + alpha = StubAgent("alpha", "ack from alpha") + beta = StubAgent("beta", "ack from beta") + + workflow = ( + GroupChatBuilder() + .select_speakers(selector, display_name="manager", final_message="done") + .participants(alpha=alpha, beta=beta) + .build() + ) + + agent = workflow.as_agent(name="group-chat-agent") + conversation = [ + ChatMessage(role=Role.USER, text="kickoff", author_name="user"), + ChatMessage(role=Role.ASSISTANT, text="noted", author_name="alpha"), + ] + response = await agent.run(conversation) + + assert response.messages, "Expected agent conversation output" + + +async def test_magentic_as_agent_accepts_conversation() -> None: + manager = StubMagenticManager() + writer = StubAgent("writer", "draft") + + workflow = MagenticBuilder().participants(writer=writer).with_standard_manager(manager=manager).build() + + agent = workflow.as_agent(name="magentic-agent") + conversation = [ + ChatMessage(role=Role.SYSTEM, text="Guidelines", author_name="system"), + ChatMessage(role=Role.USER, text="Summarize the findings", author_name="requester"), + ] + response = await agent.run(conversation) + + assert isinstance(response, AgentRunResponse) + + +# Comprehensive tests for group chat functionality + + +class TestGroupChatBuilder: + """Tests for GroupChatBuilder validation and configuration.""" + + def test_build_without_manager_raises_error(self) -> None: + """Test that building without a manager raises ValueError.""" + agent = StubAgent("test", "response") + + builder = GroupChatBuilder().participants([agent]) + + with pytest.raises(ValueError, match="manager must be configured before build"): + builder.build() + + def test_build_without_participants_raises_error(self) -> None: + """Test that building without participants raises ValueError.""" + + def selector(state: GroupChatStateSnapshot) -> str | None: + return None + + builder = GroupChatBuilder().select_speakers(selector) + + with pytest.raises(ValueError, match="participants must be configured before build"): + builder.build() + + def test_duplicate_manager_configuration_raises_error(self) -> None: + """Test that configuring multiple managers raises ValueError.""" + + def selector(state: GroupChatStateSnapshot) -> str | None: + return None + + builder = GroupChatBuilder().select_speakers(selector) + + with pytest.raises(ValueError, match="already has a manager configured"): + builder.select_speakers(selector) + + def test_empty_participants_raises_error(self) -> None: + """Test that empty participants list raises ValueError.""" + + def selector(state: GroupChatStateSnapshot) -> str | None: + return None + + builder = GroupChatBuilder().select_speakers(selector) + + with pytest.raises(ValueError, match="participants cannot be empty"): + builder.participants([]) + + def test_duplicate_participant_names_raises_error(self) -> None: + """Test that duplicate participant names raise ValueError.""" + agent1 = StubAgent("test", "response1") + agent2 = StubAgent("test", "response2") + + def selector(state: GroupChatStateSnapshot) -> str | None: + return None + + builder = GroupChatBuilder().select_speakers(selector) + + with pytest.raises(ValueError, match="Duplicate participant name 'test'"): + builder.participants([agent1, agent2]) + + def test_agent_without_name_raises_error(self) -> None: + """Test that agent without name attribute raises ValueError.""" + + class AgentWithoutName(BaseAgent): + def __init__(self) -> None: + super().__init__(name="", description="test") + + async def run(self, messages: Any = None, *, thread: Any = None, **kwargs: Any) -> AgentRunResponse: + return AgentRunResponse(messages=[]) + + def run_stream( + self, messages: Any = None, *, thread: Any = None, **kwargs: Any + ) -> AsyncIterable[AgentRunResponseUpdate]: + async def _stream() -> AsyncIterable[AgentRunResponseUpdate]: + yield AgentRunResponseUpdate(contents=[]) + + return _stream() + + agent = AgentWithoutName() + + def selector(state: GroupChatStateSnapshot) -> str | None: + return None + + builder = GroupChatBuilder().select_speakers(selector) + + with pytest.raises(ValueError, match="must define a non-empty 'name' attribute"): + builder.participants([agent]) + + def test_empty_participant_name_raises_error(self) -> None: + """Test that empty participant name raises ValueError.""" + agent = StubAgent("test", "response") + + def selector(state: GroupChatStateSnapshot) -> str | None: + return None + + builder = GroupChatBuilder().select_speakers(selector) + + with pytest.raises(ValueError, match="participant names must be non-empty strings"): + builder.participants({"": agent}) + + +class TestGroupChatOrchestrator: + """Tests for GroupChatOrchestratorExecutor core functionality.""" + + async def test_max_rounds_enforcement(self) -> None: + """Test that max_rounds properly limits conversation rounds.""" + call_count = {"value": 0} + + def selector(state: GroupChatStateSnapshot) -> str | None: + call_count["value"] += 1 + # Always return the agent name to try to continue indefinitely + return "agent" + + agent = StubAgent("agent", "response") + + workflow = ( + GroupChatBuilder() + .select_speakers(selector) + .participants([agent]) + .with_max_rounds(2) # Limit to 2 rounds + .build() + ) + + outputs: list[ChatMessage] = [] + async for event in workflow.run_stream("test task"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, ChatMessage): + outputs.append(data) + + # Should have terminated due to max_rounds, expect at least one output + assert len(outputs) >= 1 + # The final message should be about round limit + final_output = outputs[-1] + assert "round limit" in final_output.text.lower() + + async def test_unknown_participant_error(self) -> None: + """Test that _apply_directive raises error for unknown participants.""" + + def selector(state: GroupChatStateSnapshot) -> str | None: + return "unknown_agent" # Return non-existent participant + + agent = StubAgent("agent", "response") + + workflow = GroupChatBuilder().select_speakers(selector).participants([agent]).build() + + with pytest.raises(ValueError, match="Manager selected unknown participant 'unknown_agent'"): + async for _ in workflow.run_stream("test task"): + pass + + async def test_directive_without_agent_name_raises_error(self) -> None: + """Test that directive without agent_name raises error when finish=False.""" + + def bad_selector(state: GroupChatStateSnapshot) -> GroupChatDirective: + # Return a GroupChatDirective object instead of string to trigger error + return GroupChatDirective(finish=False, agent_name=None) # type: ignore + + agent = StubAgent("agent", "response") + + # The _SpeakerSelectorAdapter will catch this and raise TypeError + workflow = GroupChatBuilder().select_speakers(bad_selector).participants([agent]).build() # type: ignore + + # This should raise a TypeError because selector doesn't return str or None + with pytest.raises(TypeError, match="must return a participant name \\(str\\) or None"): + async for _ in workflow.run_stream("test"): + pass + + async def test_handle_empty_conversation_raises_error(self) -> None: + """Test that empty conversation list raises ValueError.""" + + def selector(state: GroupChatStateSnapshot) -> str | None: + return None + + agent = StubAgent("agent", "response") + + workflow = GroupChatBuilder().select_speakers(selector).participants([agent]).build() + + with pytest.raises(ValueError, match="requires at least one chat message"): + async for _ in workflow.run_stream([]): + pass + + async def test_unknown_participant_response_raises_error(self) -> None: + """Test that responses from unknown participants raise errors.""" + + def selector(state: GroupChatStateSnapshot) -> str | None: + return "agent" + + # Create orchestrator to test _ingest_participant_message directly + orchestrator = GroupChatOrchestratorExecutor( + manager=selector, # type: ignore + participants={"agent": "test agent"}, + manager_name="test_manager", # type: ignore + ) + + # Mock the workflow context + class MockContext: + async def yield_output(self, message: ChatMessage) -> None: + pass + + ctx = MockContext() + + # Initialize orchestrator state + orchestrator._task_message = ChatMessage(role=Role.USER, text="test") # type: ignore + orchestrator._conversation = [orchestrator._task_message] # type: ignore + orchestrator._history = [] # type: ignore + orchestrator._pending_agent = None # type: ignore + orchestrator._round_index = 0 # type: ignore + + # Test with unknown participant + message = ChatMessage(role=Role.ASSISTANT, text="response") + + with pytest.raises(ValueError, match="Received response from unknown participant 'unknown'"): + await orchestrator._ingest_participant_message("unknown", message, ctx) # type: ignore + + async def test_state_build_before_initialization_raises_error(self) -> None: + """Test that _build_state raises error before task message initialization.""" + + def selector(state: GroupChatStateSnapshot) -> str | None: + return None + + orchestrator = GroupChatOrchestratorExecutor( + manager=selector, # type: ignore + participants={"agent": "test agent"}, + manager_name="test_manager", # type: ignore + ) + + with pytest.raises(RuntimeError, match="state not initialized with task message"): + orchestrator._build_state() # type: ignore + + +class TestSpeakerSelectorAdapter: + """Tests for _SpeakerSelectorAdapter functionality.""" + + async def test_selector_returning_list_with_multiple_items_raises_error(self) -> None: + """Test that selector returning list with multiple items raises error.""" + + def bad_selector(state: GroupChatStateSnapshot) -> list[str]: + return ["agent1", "agent2"] # Multiple items + + adapter = _SpeakerSelectorAdapter(bad_selector, manager_name="manager") + + state = { + "participants": {"agent1": "desc1", "agent2": "desc2"}, + "task": ChatMessage(role=Role.USER, text="test"), + "conversation": (), + "history": (), + "round_index": 0, + "pending_agent": None, + } + + with pytest.raises(ValueError, match="must return a single participant name"): + await adapter(state) + + async def test_selector_returning_non_string_raises_error(self) -> None: + """Test that selector returning non-string raises TypeError.""" + + def bad_selector(state: GroupChatStateSnapshot) -> int: + return 42 # Not a string + + adapter = _SpeakerSelectorAdapter(bad_selector, manager_name="manager") + + state = { + "participants": {"agent": "desc"}, + "task": ChatMessage(role=Role.USER, text="test"), + "conversation": (), + "history": (), + "round_index": 0, + "pending_agent": None, + } + + with pytest.raises(TypeError, match="must return a participant name \\(str\\) or None"): + await adapter(state) + + async def test_selector_returning_empty_list_finishes(self) -> None: + """Test that selector returning empty list finishes conversation.""" + + def empty_selector(state: GroupChatStateSnapshot) -> list[str]: + return [] # Empty list should finish + + adapter = _SpeakerSelectorAdapter(empty_selector, manager_name="manager") + + state = { + "participants": {"agent": "desc"}, + "task": ChatMessage(role=Role.USER, text="test"), + "conversation": (), + "history": (), + "round_index": 0, + "pending_agent": None, + } + + directive = await adapter(state) + assert directive.finish is True + assert directive.final_message is not None + + +class TestCheckpointing: + """Tests for checkpointing functionality.""" + + async def test_workflow_with_checkpointing(self) -> None: + """Test that workflow works with checkpointing enabled.""" + + def selector(state: GroupChatStateSnapshot) -> str | None: + if state["round_index"] >= 1: + return None + return "agent" + + agent = StubAgent("agent", "response") + storage = InMemoryCheckpointStorage() + + workflow = ( + GroupChatBuilder().select_speakers(selector).participants([agent]).with_checkpointing(storage).build() + ) + + outputs: list[ChatMessage] = [] + async for event in workflow.run_stream("test task"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, ChatMessage): + outputs.append(data) + + assert len(outputs) == 1 # Should complete normally + + +class TestPromptBasedManager: + """Tests for _PromptBasedGroupChatManager.""" + + async def test_manager_with_missing_next_agent_raises_error(self) -> None: + """Test that manager directive without next_agent raises RuntimeError.""" + + class MockChatClient: + async def get_response(self, messages: Any, response_format: Any = None) -> Any: + # Return response that has finish=False but no next_agent + class MockResponse: + def __init__(self) -> None: + self.value = {"finish": False, "next_agent": None} + self.messages: list[Any] = [] + + return MockResponse() + + manager = _PromptBasedGroupChatManager(MockChatClient()) # type: ignore + + state = { + "participants": {"agent": "desc"}, + "task": ChatMessage(role=Role.USER, text="test"), + "conversation": (), + } + + with pytest.raises(RuntimeError, match="missing next_agent while finish is False"): + await manager(state) + + async def test_manager_with_unknown_participant_raises_error(self) -> None: + """Test that manager selecting unknown participant raises RuntimeError.""" + + class MockChatClient: + async def get_response(self, messages: Any, response_format: Any = None) -> Any: + # Return response selecting unknown participant + class MockResponse: + def __init__(self) -> None: + self.value = {"finish": False, "next_agent": "unknown"} + self.messages: list[Any] = [] + + return MockResponse() + + manager = _PromptBasedGroupChatManager(MockChatClient()) # type: ignore + + state = { + "participants": {"agent": "desc"}, + "task": ChatMessage(role=Role.USER, text="test"), + "conversation": (), + } + + with pytest.raises(RuntimeError, match="Manager selected unknown participant 'unknown'"): + await manager(state) + + +class TestFactoryFunctions: + """Tests for factory functions.""" + + def test_default_orchestrator_factory_without_manager_raises_error(self) -> None: + """Test that default factory requires manager to be set.""" + config = _GroupChatConfig(manager=None, manager_name="test", participants={}) + + with pytest.raises(RuntimeError, match="requires a manager to be set"): + _default_orchestrator_factory(config) + + +class TestConversationHandling: + """Tests for different conversation input types.""" + + async def test_handle_string_input(self) -> None: + """Test handling string input creates proper ChatMessage.""" + + def selector(state: GroupChatStateSnapshot) -> str | None: + # Verify the task was properly converted + assert state["task"].role == Role.USER + assert state["task"].text == "test string" + return None + + agent = StubAgent("agent", "response") + + workflow = GroupChatBuilder().select_speakers(selector).participants([agent]).build() + + outputs: list[ChatMessage] = [] + async for event in workflow.run_stream("test string"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, ChatMessage): + outputs.append(data) + + assert len(outputs) == 1 + + async def test_handle_chat_message_input(self) -> None: + """Test handling ChatMessage input directly.""" + task_message = ChatMessage(role=Role.USER, text="test message") + + def selector(state: GroupChatStateSnapshot) -> str | None: + # Verify the task message was preserved + assert state["task"] == task_message + return None + + agent = StubAgent("agent", "response") + + workflow = GroupChatBuilder().select_speakers(selector).participants([agent]).build() + + outputs: list[ChatMessage] = [] + async for event in workflow.run_stream(task_message): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, ChatMessage): + outputs.append(data) + + assert len(outputs) == 1 + + async def test_handle_conversation_list_input(self) -> None: + """Test handling conversation list preserves context.""" + conversation = [ + ChatMessage(role=Role.SYSTEM, text="system message"), + ChatMessage(role=Role.USER, text="user message"), + ] + + def selector(state: GroupChatStateSnapshot) -> str | None: + # Verify conversation context is preserved + assert len(state["conversation"]) == 2 + assert state["task"].text == "user message" + return None + + agent = StubAgent("agent", "response") + + workflow = GroupChatBuilder().select_speakers(selector).participants([agent]).build() + + outputs: list[ChatMessage] = [] + async for event in workflow.run_stream(conversation): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, ChatMessage): + outputs.append(data) + + assert len(outputs) == 1 + + +class TestRoundLimitEnforcement: + """Tests for round limit checking functionality.""" + + async def test_round_limit_in_apply_directive(self) -> None: + """Test round limit enforcement in _apply_directive.""" + rounds_called = {"count": 0} + + def selector(state: GroupChatStateSnapshot) -> str | None: + rounds_called["count"] += 1 + # Keep trying to select agent to test limit enforcement + return "agent" + + agent = StubAgent("agent", "response") + + workflow = ( + GroupChatBuilder() + .select_speakers(selector) + .participants([agent]) + .with_max_rounds(1) # Very low limit + .build() + ) + + outputs: list[ChatMessage] = [] + async for event in workflow.run_stream("test"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, ChatMessage): + outputs.append(data) + + # Should have at least one output (the round limit message) + assert len(outputs) >= 1 + # The last message should be about round limit + final_output = outputs[-1] + assert "round limit" in final_output.text.lower() + + async def test_round_limit_in_ingest_participant_message(self) -> None: + """Test round limit enforcement after participant response.""" + responses_received = {"count": 0} + + def selector(state: GroupChatStateSnapshot) -> str | None: + responses_received["count"] += 1 + if responses_received["count"] == 1: + return "agent" # First call selects agent + return "agent" # Try to continue, but should hit limit + + agent = StubAgent("agent", "response from agent") + + workflow = ( + GroupChatBuilder() + .select_speakers(selector) + .participants([agent]) + .with_max_rounds(1) # Hit limit after first response + .build() + ) + + outputs: list[ChatMessage] = [] + async for event in workflow.run_stream("test"): + if isinstance(event, WorkflowOutputEvent): + data = event.data + if isinstance(data, ChatMessage): + outputs.append(data) + + # Should have at least one output (the round limit message) + assert len(outputs) >= 1 + # The last message should be about round limit + final_output = outputs[-1] + assert "round limit" in final_output.text.lower() diff --git a/python/packages/core/tests/workflow/test_handoff.py b/python/packages/core/tests/workflow/test_handoff.py index ea8d7faead..12d115ad40 100644 --- a/python/packages/core/tests/workflow/test_handoff.py +++ b/python/packages/core/tests/workflow/test_handoff.py @@ -54,6 +54,7 @@ def __init__( extra_properties: dict[str, object] | None = None, ) -> None: super().__init__(id=name, name=name, display_name=name) + self._agent_name = name self.handoff_to = handoff_to self.calls: list[list[ChatMessage]] = [] self._text_handoff = text_handoff @@ -72,7 +73,7 @@ async def run( # type: ignore[override] additional_properties = _merge_additional_properties( self.handoff_to, self._text_handoff, self._extra_properties ) - contents = _build_reply_contents(self.name, self.handoff_to, self._text_handoff, self._next_call_id()) + contents = _build_reply_contents(self._agent_name, self.handoff_to, self._text_handoff, self._next_call_id()) reply = ChatMessage( role=Role.ASSISTANT, contents=contents, @@ -91,7 +92,7 @@ async def run_stream( # type: ignore[override] conversation = _normalise(messages) self.calls.append(conversation) additional_props = _merge_additional_properties(self.handoff_to, self._text_handoff, self._extra_properties) - contents = _build_reply_contents(self.name, self.handoff_to, self._text_handoff, self._next_call_id()) + contents = _build_reply_contents(self._agent_name, self.handoff_to, self._text_handoff, self._next_call_id()) yield AgentRunResponseUpdate( contents=contents, role=Role.ASSISTANT, @@ -357,3 +358,38 @@ async def test_multiple_runs_dont_leak_conversation(): assert not any("First run message" in msg.text for msg in second_run_user_messages if msg.text), ( "Second run should NOT contain first run's messages" ) + + +async def test_handoff_async_termination_condition() -> None: + """Test that async termination conditions work correctly.""" + termination_call_count = 0 + + async def async_termination(conv: list[ChatMessage]) -> bool: + nonlocal termination_call_count + termination_call_count += 1 + user_count = sum(1 for msg in conv if msg.role == Role.USER) + return user_count >= 2 + + coordinator = _RecordingAgent(name="coordinator") + + workflow = ( + HandoffBuilder(participants=[coordinator]) + .set_coordinator(coordinator) + .with_termination_condition(async_termination) + .build() + ) + + events = await _drain(workflow.run_stream("First user message")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + + events = await _drain(workflow.send_responses_streaming({requests[-1].request_id: "Second user message"})) + outputs = [ev for ev in events if isinstance(ev, WorkflowOutputEvent)] + assert len(outputs) == 1 + + final_conversation = outputs[0].data + assert isinstance(final_conversation, list) + final_conv_list = cast(list[ChatMessage], final_conversation) + user_messages = [msg for msg in final_conv_list if msg.role == Role.USER] + assert len(user_messages) == 2 + assert termination_call_count > 0 diff --git a/python/packages/core/tests/workflow/test_magentic.py b/python/packages/core/tests/workflow/test_magentic.py index b52449a928..fe83fa0ea4 100644 --- a/python/packages/core/tests/workflow/test_magentic.py +++ b/python/packages/core/tests/workflow/test_magentic.py @@ -2,7 +2,7 @@ from collections.abc import AsyncIterable from dataclasses import dataclass -from typing import Any +from typing import Any, cast import pytest @@ -15,13 +15,12 @@ ChatResponse, ChatResponseUpdate, Executor, + MagenticAgentMessageEvent, MagenticBuilder, MagenticManagerBase, MagenticPlanReviewDecision, MagenticPlanReviewReply, MagenticPlanReviewRequest, - MagenticProgressLedger, - MagenticProgressLedgerItem, RequestInfoEvent, Role, TextContent, @@ -34,17 +33,19 @@ handler, ) from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage -from agent_framework._workflows._magentic import ( +from agent_framework._workflows._magentic import ( # type: ignore[reportPrivateUsage] MagenticAgentExecutor, MagenticContext, MagenticOrchestratorExecutor, - MagenticStartMessage, + _MagenticProgressLedger, # type: ignore + _MagenticProgressLedgerItem, # type: ignore + _MagenticStartMessage, # type: ignore ) def test_magentic_start_message_from_string(): - msg = MagenticStartMessage.from_string("Do the thing") - assert isinstance(msg, MagenticStartMessage) + msg = _MagenticStartMessage.from_string("Do the thing") + assert isinstance(msg, _MagenticStartMessage) assert isinstance(msg.task, ChatMessage) assert msg.task.role == Role.USER assert msg.task.text == "Do the thing" @@ -114,8 +115,9 @@ def restore_state(self, state: dict[str, Any]) -> None: super().restore_state(state) ledger_state = state.get("task_ledger") if isinstance(ledger_state, dict): - facts_payload = ledger_state.get("facts") # type: ignore[reportUnknownMemberType] - plan_payload = ledger_state.get("plan") # type: ignore[reportUnknownMemberType] + ledger_dict = cast(dict[str, Any], ledger_state) + facts_payload = cast(dict[str, Any] | None, ledger_dict.get("facts")) + plan_payload = cast(dict[str, Any] | None, ledger_dict.get("plan")) if facts_payload is not None and plan_payload is not None: try: facts = ChatMessage.from_dict(facts_payload) @@ -138,14 +140,14 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: combined = f"Task: {magentic_context.task.text}\n\nFacts:\n{facts.text}\n\nPlan:\n{plan.text}" return ChatMessage(role=Role.ASSISTANT, text=combined, author_name="magentic_manager") - async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: + async def create_progress_ledger(self, magentic_context: MagenticContext) -> _MagenticProgressLedger: is_satisfied = self.satisfied_after_signoff and len(magentic_context.chat_history) > 0 - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(reason="test", answer=is_satisfied), - is_in_loop=MagenticProgressLedgerItem(reason="test", answer=False), - is_progress_being_made=MagenticProgressLedgerItem(reason="test", answer=True), - next_speaker=MagenticProgressLedgerItem(reason="test", answer=self.next_speaker_name), - instruction_or_question=MagenticProgressLedgerItem(reason="test", answer=self.instruction_text), + return _MagenticProgressLedger( + is_request_satisfied=_MagenticProgressLedgerItem(reason="test", answer=is_satisfied), + is_in_loop=_MagenticProgressLedgerItem(reason="test", answer=False), + is_progress_being_made=_MagenticProgressLedgerItem(reason="test", answer=True), + next_speaker=_MagenticProgressLedgerItem(reason="test", answer=self.next_speaker_name), + instruction_or_question=_MagenticProgressLedgerItem(reason="test", answer=self.instruction_text), ) async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: @@ -175,7 +177,7 @@ async def test_standard_manager_progress_ledger_and_fallback(): ) ledger = await manager.create_progress_ledger(ctx.clone()) - assert isinstance(ledger, MagenticProgressLedger) + assert isinstance(ledger, _MagenticProgressLedger) assert ledger.next_speaker.answer == "agentA" manager.satisfied_after_signoff = False @@ -328,13 +330,11 @@ async def test_magentic_checkpoint_resume_round_trip(): .build() ) - orchestrator = next( - exec for exec in wf_resume.workflow.executors.values() if isinstance(exec, MagenticOrchestratorExecutor) - ) + orchestrator = next(exec for exec in wf_resume.executors.values() if isinstance(exec, MagenticOrchestratorExecutor)) reply = MagenticPlanReviewReply(decision=MagenticPlanReviewDecision.APPROVE) completed: WorkflowOutputEvent | None = None - async for event in wf_resume.workflow.run_stream_from_checkpoint( + async for event in wf_resume.run_stream_from_checkpoint( resume_checkpoint.checkpoint_id, responses={req_event.request_id: reply}, ): @@ -346,8 +346,8 @@ async def test_magentic_checkpoint_resume_round_trip(): assert orchestrator._context.chat_history # type: ignore[reportPrivateUsage] assert orchestrator._task_ledger is not None # type: ignore[reportPrivateUsage] assert manager2.task_ledger is not None - # Initial message should be the task ledger plan - assert orchestrator._context.chat_history[0].text == orchestrator._task_ledger.text # type: ignore[reportPrivateUsage] + # Latest entry in chat history should be the task ledger plan + assert orchestrator._context.chat_history[-1].text == orchestrator._task_ledger.text # type: ignore[reportPrivateUsage] class _DummyExec(Executor): @@ -472,24 +472,24 @@ async def plan(self, magentic_context: MagenticContext) -> ChatMessage: async def replan(self, magentic_context: MagenticContext) -> ChatMessage: return ChatMessage(role=Role.ASSISTANT, text="re-ledger") - async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: + async def create_progress_ledger(self, magentic_context: MagenticContext) -> _MagenticProgressLedger: if not self._invoked: # First round: ask agentA to respond self._invoked = True - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(reason="r", answer=False), - is_in_loop=MagenticProgressLedgerItem(reason="r", answer=False), - is_progress_being_made=MagenticProgressLedgerItem(reason="r", answer=True), - next_speaker=MagenticProgressLedgerItem(reason="r", answer="agentA"), - instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="say hi"), + return _MagenticProgressLedger( + is_request_satisfied=_MagenticProgressLedgerItem(reason="r", answer=False), + is_in_loop=_MagenticProgressLedgerItem(reason="r", answer=False), + is_progress_being_made=_MagenticProgressLedgerItem(reason="r", answer=True), + next_speaker=_MagenticProgressLedgerItem(reason="r", answer="agentA"), + instruction_or_question=_MagenticProgressLedgerItem(reason="r", answer="say hi"), ) # Next round: mark satisfied so run can conclude - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(reason="r", answer=True), - is_in_loop=MagenticProgressLedgerItem(reason="r", answer=False), - is_progress_being_made=MagenticProgressLedgerItem(reason="r", answer=True), - next_speaker=MagenticProgressLedgerItem(reason="r", answer="agentA"), - instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="done"), + return _MagenticProgressLedger( + is_request_satisfied=_MagenticProgressLedgerItem(reason="r", answer=True), + is_in_loop=_MagenticProgressLedgerItem(reason="r", answer=False), + is_progress_being_made=_MagenticProgressLedgerItem(reason="r", answer=True), + next_speaker=_MagenticProgressLedgerItem(reason="r", answer="agentA"), + instruction_or_question=_MagenticProgressLedgerItem(reason="r", answer="done"), ) async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: @@ -533,17 +533,10 @@ async def run(self, messages=None, *, thread=None, **kwargs): # type: ignore[ov async def _collect_agent_responses_setup(participant_obj: object): captured: list[ChatMessage] = [] - async def sink(event) -> None: # type: ignore[no-untyped-def] - from agent_framework._workflows._magentic import MagenticAgentMessageEvent - - if isinstance(event, MagenticAgentMessageEvent) and event.message is not None: - captured.append(event.message) - wf = ( MagenticBuilder() .participants(agentA=participant_obj) # type: ignore[arg-type] .with_standard_manager(InvokeOnceManager()) - .on_event(sink) # type: ignore .build() ) @@ -551,6 +544,10 @@ async def sink(event) -> None: # type: ignore[no-untyped-def] events: list[WorkflowEvent] = [] async for ev in wf.run_stream("task"): # plan review disabled events.append(ev) + if isinstance(ev, WorkflowOutputEvent): + break + if isinstance(ev, MagenticAgentMessageEvent) and ev.message is not None: + captured.append(ev.message) if len(events) > 50: break @@ -559,7 +556,7 @@ async def sink(event) -> None: # type: ignore[no-untyped-def] async def test_agent_executor_invoke_with_thread_chat_client(): captured = await _collect_agent_responses_setup(StubThreadAgent()) - # Should have at least one response from agentA via MagenticAgentExecutor path + # Should have at least one response from agentA via _MagenticAgentExecutor path assert any((m.author_name == "agentA" and "ok" in (m.text or "")) for m in captured) @@ -685,7 +682,7 @@ async def test_magentic_checkpoint_resume_rejects_participant_renames(): .build() ) - with pytest.raises(RuntimeError, match="participant names do not match"): + with pytest.raises(ValueError, match="Workflow graph has changed"): async for _ in renamed_workflow.run_stream_from_checkpoint( target_checkpoint.checkpoint_id, # type: ignore[reportUnknownMemberType] responses={req_event.request_id: MagenticPlanReviewReply(decision=MagenticPlanReviewDecision.APPROVE)}, @@ -704,13 +701,13 @@ async def plan(self, magentic_context: MagenticContext) -> ChatMessage: async def replan(self, magentic_context: MagenticContext) -> ChatMessage: return ChatMessage(role=Role.ASSISTANT, text="re-ledger") - async def create_progress_ledger(self, magentic_context: MagenticContext) -> MagenticProgressLedger: - return MagenticProgressLedger( - is_request_satisfied=MagenticProgressLedgerItem(reason="r", answer=False), - is_in_loop=MagenticProgressLedgerItem(reason="r", answer=True), - is_progress_being_made=MagenticProgressLedgerItem(reason="r", answer=False), - next_speaker=MagenticProgressLedgerItem(reason="r", answer="agentA"), - instruction_or_question=MagenticProgressLedgerItem(reason="r", answer="done"), + async def create_progress_ledger(self, magentic_context: MagenticContext) -> _MagenticProgressLedger: + return _MagenticProgressLedger( + is_request_satisfied=_MagenticProgressLedgerItem(reason="r", answer=False), + is_in_loop=_MagenticProgressLedgerItem(reason="r", answer=True), + is_progress_being_made=_MagenticProgressLedgerItem(reason="r", answer=False), + next_speaker=_MagenticProgressLedgerItem(reason="r", answer="agentA"), + instruction_or_question=_MagenticProgressLedgerItem(reason="r", answer="done"), ) async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatMessage: diff --git a/python/packages/devui/agent_framework_devui/_utils.py b/python/packages/devui/agent_framework_devui/_utils.py index 58aedbd2f3..19be9d5f35 100644 --- a/python/packages/devui/agent_framework_devui/_utils.py +++ b/python/packages/devui/agent_framework_devui/_utils.py @@ -6,7 +6,10 @@ import json import logging from dataclasses import fields, is_dataclass -from typing import Any, get_args, get_origin +from types import UnionType +from typing import Any, Union, get_args, get_origin + +from agent_framework import ChatMessage logger = logging.getLogger(__name__) @@ -110,10 +113,25 @@ def extract_executor_message_types(executor: Any) -> list[Any]: return message_types +def _contains_chat_message(type_hint: Any) -> bool: + """Check whether the provided type hint directly or indirectly references ChatMessage.""" + if type_hint is ChatMessage: + return True + + origin = get_origin(type_hint) + if origin in (list, tuple): + return any(_contains_chat_message(arg) for arg in get_args(type_hint)) + + if origin in (Union, UnionType): + return any(_contains_chat_message(arg) for arg in get_args(type_hint)) + + return False + + def select_primary_input_type(message_types: list[Any]) -> Any | None: """Choose the most user-friendly input type for workflow inputs. - Prefers str and dict types for better user experience. + Prefers ChatMessage (or containers thereof) and then falls back to primitives. Args: message_types: List of possible message types @@ -124,6 +142,10 @@ def select_primary_input_type(message_types: list[Any]) -> Any | None: if not message_types: return None + for message_type in message_types: + if _contains_chat_message(message_type): + return ChatMessage + preferred = (str, dict) for candidate in preferred: diff --git a/python/samples/README.md b/python/samples/README.md index 182c635c6c..4178ec752c 100644 --- a/python/samples/README.md +++ b/python/samples/README.md @@ -288,9 +288,13 @@ This directory contains samples demonstrating the capabilities of Microsoft Agen | [`getting_started/workflows/orchestration/concurrent_agents.py`](./getting_started/workflows/orchestration/concurrent_agents.py) | Sample: Concurrent fan-out/fan-in (agent-only API) with default aggregator | | [`getting_started/workflows/orchestration/concurrent_custom_agent_executors.py`](./getting_started/workflows/orchestration/concurrent_custom_agent_executors.py) | Sample: Concurrent Orchestration with Custom Agent Executors | | [`getting_started/workflows/orchestration/concurrent_custom_aggregator.py`](./getting_started/workflows/orchestration/concurrent_custom_aggregator.py) | Sample: Concurrent Orchestration with Custom Aggregator | -| [`getting_started/workflows/orchestration/magentic.py`](./getting_started/workflows/orchestration/magentic.py) | Sample: Magentic Orchestration (multi-agent) | -| [`getting_started/workflows/orchestration/magentic_checkpoint.py`](./getting_started/workflows/orchestration/magentic_checkpoint.py) | Sample: Magentic Orchestration + Checkpointing | -| [`getting_started/workflows/orchestration/magentic_human_plan_update.py`](./getting_started/workflows/orchestration/magentic_human_plan_update.py) | Sample: Magentic Orchestration + Human Plan Review | +| [`getting_started/workflows/orchestration/group_chat_prompt_based_manager.py`](./getting_started/workflows/orchestration/group_chat_prompt_based_manager.py) | Sample: Group Chat Orchestration with LLM-based manager | +| [`getting_started/workflows/orchestration/group_chat_simple_selector.py`](./getting_started/workflows/orchestration/group_chat_simple_selector.py) | Sample: Group Chat Orchestration with function-based speaker selector | +| [`getting_started/workflows/orchestration/handoff_simple.py`](./getting_started/workflows/orchestration/handoff_simple.py) | Sample: Handoff Orchestration with simple agent handoff pattern | +| [`getting_started/workflows/orchestration/handoff_specialist_to_specialist.py`](./getting_started/workflows/orchestration/handoff_specialist_to_specialist.py) | Sample: Handoff Orchestration with specialist-to-specialist routing | +| [`getting_started/workflows/orchestration/magentic.py`](./getting_started/workflows/orchestration/magentic.py) | Sample: Magentic Orchestration (agentic task planning with multi-agent execution) | +| [`getting_started/workflows/orchestration/magentic_checkpoint.py`](./getting_started/workflows/orchestration/magentic_checkpoint.py) | Sample: Magentic Orchestration with Checkpointing | +| [`getting_started/workflows/orchestration/magentic_human_plan_update.py`](./getting_started/workflows/orchestration/magentic_human_plan_update.py) | Sample: Magentic Orchestration with Human Plan Review | | [`getting_started/workflows/orchestration/sequential_agents.py`](./getting_started/workflows/orchestration/sequential_agents.py) | Sample: Sequential workflow (agent-focused API) with shared conversation context | | [`getting_started/workflows/orchestration/sequential_custom_executors.py`](./getting_started/workflows/orchestration/sequential_custom_executors.py) | Sample: Sequential workflow mixing agents and a custom summarizer executor | @@ -321,4 +325,3 @@ For information on creating new samples, see [SAMPLE_GUIDELINES.md](./SAMPLE_GUI ## More Information - [Python Package Documentation](../README.md) - diff --git a/python/samples/getting_started/workflows/README.md b/python/samples/getting_started/workflows/README.md index 26e8cdd3ba..efbf5bdff2 100644 --- a/python/samples/getting_started/workflows/README.md +++ b/python/samples/getting_started/workflows/README.md @@ -39,6 +39,9 @@ Once comfortable with these, explore the rest of the samples below. | Azure Chat Agents (Function Bridge) | [agents/azure_chat_agents_function_bridge.py](./agents/azure_chat_agents_function_bridge.py) | Chain two agents with a function executor that injects external context | | Azure Chat Agents (Tools + HITL) | [agents/azure_chat_agents_tool_calls_with_feedback.py](./agents/azure_chat_agents_tool_calls_with_feedback.py) | Tool-enabled writer/editor pipeline with human feedback gating via RequestInfoExecutor | | Custom Agent Executors | [agents/custom_agent_executors.py](./agents/custom_agent_executors.py) | Create executors to handle agent run methods | +| Sequential Workflow as Agent | [agents/sequential_workflow_as_agent.py](./agents/sequential_workflow_as_agent.py) | Build a sequential workflow orchestrating agents, then expose it as a reusable agent | +| Concurrent Workflow as Agent | [agents/concurrent_workflow_as_agent.py](./agents/concurrent_workflow_as_agent.py) | Build a concurrent fan-out/fan-in workflow, then expose it as a reusable agent | +| Magentic Workflow as Agent | [agents/magentic_workflow_as_agent.py](./agents/magentic_workflow_as_agent.py) | Configure Magentic orchestration with callbacks, then expose the workflow as an agent | | Workflow as Agent (Reflection Pattern) | [agents/workflow_as_agent_reflection_pattern.py](./agents/workflow_as_agent_reflection_pattern.py) | Wrap a workflow so it can behave like an agent (reflection pattern) | | Workflow as Agent + HITL | [agents/workflow_as_agent_human_in_the_loop.py](./agents/workflow_as_agent_human_in_the_loop.py) | Extend workflow-as-agent with human-in-the-loop capability | @@ -89,6 +92,8 @@ Once comfortable with these, explore the rest of the samples below. | Concurrent Orchestration (Default Aggregator) | [orchestration/concurrent_agents.py](./orchestration/concurrent_agents.py) | Fan-out to multiple agents; fan-in with default aggregator returning combined ChatMessages | | Concurrent Orchestration (Custom Aggregator) | [orchestration/concurrent_custom_aggregator.py](./orchestration/concurrent_custom_aggregator.py) | Override aggregator via callback; summarize results with an LLM | | Concurrent Orchestration (Custom Agent Executors) | [orchestration/concurrent_custom_agent_executors.py](./orchestration/concurrent_custom_agent_executors.py) | Child executors own ChatAgents; concurrent fan-out/fan-in via ConcurrentBuilder | +| Group Chat Orchestration with Prompt Based Manager | [orchestration/group_chat_prompt_based_manager.py](./orchestration/group_chat_prompt_based_manager.py) | LLM Manager-directed conversation using GroupChatBuilder | +| Group Chat with Simple Function Selector | [orchestration/group_chat_simple_selector.py](./orchestration/group_chat_simple_selector.py) | Group chat with a simple function selector for next speaker | | Handoff (Simple) | [orchestration/handoff_simple.py](./orchestration/handoff_simple.py) | Single-tier routing: triage agent routes to specialists, control returns to user after each specialist response | | Handoff (Specialist-to-Specialist) | [orchestration/handoff_specialist_to_specialist.py](./orchestration/handoff_specialist_to_specialist.py) | Multi-tier routing: specialists can hand off to other specialists using `.add_handoff()` fluent API | | Magentic Workflow (Multi-Agent) | [orchestration/magentic.py](./orchestration/magentic.py) | Orchestrate multiple agents with Magentic manager and streaming | diff --git a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py new file mode 100644 index 0000000000..29dfc1874f --- /dev/null +++ b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py @@ -0,0 +1,126 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from agent_framework import ConcurrentBuilder +from agent_framework.azure import AzureOpenAIChatClient +from azure.identity import AzureCliCredential + +""" +Sample: Build a concurrent workflow orchestration and wrap it as an agent. + +This script wires up a fan-out/fan-in workflow using `ConcurrentBuilder`, and then +invokes the entire orchestration through the `workflow.as_agent(...)` interface so +downstream coordinators can reuse the orchestration as a single agent. + +Demonstrates: +- Fan-out to multiple agents, fan-in aggregation of final ChatMessages. +- Reusing the orchestrated workflow as an agent entry point with `workflow.as_agent(...)`. +- Workflow completion when idle with no pending work + +Prerequisites: +- Azure OpenAI access configured for AzureOpenAIChatClient (use az login + env vars) +- Familiarity with Workflow events (AgentRunEvent, WorkflowOutputEvent) +""" + + +async def main() -> None: + # 1) Create three domain agents using AzureOpenAIChatClient + chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + + researcher = chat_client.create_agent( + instructions=( + "You're an expert market and product researcher. Given a prompt, provide concise, factual insights," + " opportunities, and risks." + ), + name="researcher", + ) + + marketer = chat_client.create_agent( + instructions=( + "You're a creative marketing strategist. Craft compelling value propositions and target messaging" + " aligned to the prompt." + ), + name="marketer", + ) + + legal = chat_client.create_agent( + instructions=( + "You're a cautious legal/compliance reviewer. Highlight constraints, disclaimers, and policy concerns" + " based on the prompt." + ), + name="legal", + ) + + # 2) Build a concurrent workflow + workflow = ConcurrentBuilder().participants([researcher, marketer, legal]).build() + + # 3) Expose the concurrent workflow as an agent for easy reuse + agent = workflow.as_agent(name="ConcurrentWorkflowAgent") + prompt = "We are launching a new budget-friendly electric bike for urban commuters." + agent_response = await agent.run(prompt) + + if agent_response.messages: + print("\n===== Aggregated Messages =====") + for i, msg in enumerate(agent_response.messages, start=1): + role = getattr(msg.role, "value", msg.role) + name = msg.author_name if msg.author_name else role + print(f"{'-' * 60}\n\n{i:02d} [{name}]:\n{msg.text}") + + """ + Sample Output: + + ===== Aggregated Messages ===== + ------------------------------------------------------------ + + 01 [user]: + We are launching a new budget-friendly electric bike for urban commuters. + ------------------------------------------------------------ + + 02 [researcher]: + **Insights:** + + - **Target Demographic:** Urban commuters seeking affordable, eco-friendly transport; + likely to include students, young professionals, and price-sensitive urban residents. + - **Market Trends:** E-bike sales are growing globally, with increasing urbanization, + higher fuel costs, and sustainability concerns driving adoption. + - **Competitive Landscape:** Key competitors include brands like Rad Power Bikes, Aventon, + Lectric, and domestic budget-focused manufacturers in North America, Europe, and Asia. + - **Feature Expectations:** Customers expect reliability, ease-of-use, theft protection, + lightweight design, sufficient battery range for daily city commutes (typically 25-40 miles), + and low-maintenance components. + + **Opportunities:** + + - **First-time Buyers:** Capture newcomers to e-biking by emphasizing affordability, ease of + operation, and cost savings vs. public transit/car ownership. + ... + ------------------------------------------------------------ + + 03 [marketer]: + **Value Proposition:** + "Empowering your city commute: Our new electric bike combines affordability, reliability, and + sustainable design—helping you conquer urban journeys without breaking the bank." + + **Target Messaging:** + + *For Young Professionals:* + ... + ------------------------------------------------------------ + + 04 [legal]: + **Constraints, Disclaimers, & Policy Concerns for Launching a Budget-Friendly Electric Bike for Urban Commuters:** + + **1. Regulatory Compliance** + - Verify that the electric bike meets all applicable federal, state, and local regulations + regarding e-bike classification, speed limits, power output, and safety features. + - Ensure necessary certifications (e.g., UL certification for batteries, CE markings if sold internationally) are obtained. + + **2. Product Safety** + - Include consumer safety warnings regarding use, battery handling, charging protocols, and age restrictions. + ... + """ # noqa: E501 + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py new file mode 100644 index 0000000000..ff147df453 --- /dev/null +++ b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py @@ -0,0 +1,67 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging + +from agent_framework import ChatAgent, GroupChatBuilder +from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient + +logging.basicConfig(level=logging.INFO) + +""" +Sample: Group Chat Orchestration (manager-directed) + +What it does: +- Demonstrates the generic GroupChatBuilder with a language-model manager directing two agents. +- The manager coordinates a researcher (chat completions) and a writer (responses API) to solve a task. +- Uses the default group chat orchestration pipeline shared with Magentic. + +Prerequisites: +- OpenAI environment variables configured for `OpenAIChatClient` and `OpenAIResponsesClient`. +""" + + +async def main() -> None: + researcher = ChatAgent( + name="Researcher", + description="Collects relevant background information.", + instructions="Gather concise facts that help a teammate answer the question.", + chat_client=OpenAIChatClient(model_id="gpt-4o-mini"), + ) + + writer = ChatAgent( + name="Writer", + description="Synthesizes a polished answer using the gathered notes.", + instructions="Compose clear and structured answers using any notes provided.", + chat_client=OpenAIResponsesClient(), + ) + + workflow = ( + GroupChatBuilder() + .set_prompt_based_manager(chat_client=OpenAIChatClient(), display_name="Coordinator") + .participants(researcher=researcher, writer=writer) + .build() + ) + + task = "Outline the core considerations for planning a community hackathon, and finish with a concise action plan." + + print("\nStarting Group Chat Workflow...\n") + print(f"Input: {task}\n") + + try: + workflow_agent = workflow.as_agent(name="GroupChatWorkflowAgent") + agent_result = await workflow_agent.run(task) + + if agent_result.messages: + print("\n===== as_agent() Transcript =====") + for i, msg in enumerate(agent_result.messages, start=1): + role_value = getattr(msg.role, "value", msg.role) + speaker = msg.author_name or role_value + print(f"{'-' * 50}\n{i:02d} [{speaker}]\n{msg.text}") + + except Exception as e: + print(f"Workflow execution failed: {e}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py new file mode 100644 index 0000000000..6fab7c495c --- /dev/null +++ b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py @@ -0,0 +1,139 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging + +from agent_framework import ( + ChatAgent, + HostedCodeInterpreterTool, + MagenticAgentDeltaEvent, + MagenticAgentMessageEvent, + MagenticBuilder, + MagenticFinalResultEvent, + MagenticOrchestratorMessageEvent, + WorkflowOutputEvent, +) +from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger(__name__) + +""" +Sample: Build a Magentic orchestration and wrap it as an agent. + +The script configures a Magentic workflow with streaming callbacks, then invokes the +orchestration through `workflow.as_agent(...)` so the entire Magentic loop can be reused +like any other agent while still emitting callback telemetry. + +Prerequisites: +- OpenAI credentials configured for `OpenAIChatClient` and `OpenAIResponsesClient`. +""" + + +async def main() -> None: + researcher_agent = ChatAgent( + name="ResearcherAgent", + description="Specialist in research and information gathering", + instructions=( + "You are a Researcher. You find information without additional computation or quantitative analysis." + ), + # This agent requires the gpt-4o-search-preview model to perform web searches. + # Feel free to explore with other agents that support web search, for example, + # the `OpenAIResponseAgent` or `AzureAgentProtocol` with bing grounding. + chat_client=OpenAIChatClient(model_id="gpt-4o-search-preview"), + ) + + coder_agent = ChatAgent( + name="CoderAgent", + description="A helpful assistant that writes and executes code to process and analyze data.", + instructions="You solve questions using code. Please provide detailed analysis and computation process.", + chat_client=OpenAIResponsesClient(), + tools=HostedCodeInterpreterTool(), + ) + + print("\nBuilding Magentic Workflow...") + + workflow = ( + MagenticBuilder() + .participants(researcher=researcher_agent, coder=coder_agent) + .with_standard_manager( + chat_client=OpenAIChatClient(), + max_round_count=10, + max_stall_count=3, + max_reset_count=2, + ) + .build() + ) + + task = ( + "I am preparing a report on the energy efficiency of different machine learning model architectures. " + "Compare the estimated training and inference energy consumption of ResNet-50, BERT-base, and GPT-2 " + "on standard datasets (e.g., ImageNet for ResNet, GLUE for BERT, WebText for GPT-2). " + "Then, estimate the CO2 emissions associated with each, assuming training on an Azure Standard_NC6s_v3 " + "VM for 24 hours. Provide tables for clarity, and recommend the most energy-efficient model " + "per task type (image classification, text classification, and text generation)." + ) + + print(f"\nTask: {task}") + print("\nStarting workflow execution...") + + try: + last_stream_agent_id: str | None = None + stream_line_open: bool = False + final_output: str | None = None + + async for event in workflow.run_stream(task): + if isinstance(event, MagenticOrchestratorMessageEvent): + print(f"\n[ORCH:{event.kind}]\n\n{getattr(event.message, 'text', '')}\n{'-' * 26}") + elif isinstance(event, MagenticAgentDeltaEvent): + if last_stream_agent_id != event.agent_id or not stream_line_open: + if stream_line_open: + print() + print(f"\n[STREAM:{event.agent_id}]: ", end="", flush=True) + last_stream_agent_id = event.agent_id + stream_line_open = True + if event.text: + print(event.text, end="", flush=True) + elif isinstance(event, MagenticAgentMessageEvent): + if stream_line_open: + print(" (final)") + stream_line_open = False + print() + msg = event.message + if msg is not None: + response_text = (msg.text or "").replace("\n", " ") + print(f"\n[AGENT:{event.agent_id}] {msg.role.value}\n\n{response_text}\n{'-' * 26}") + elif isinstance(event, MagenticFinalResultEvent): + print("\n" + "=" * 50) + print("FINAL RESULT:") + print("=" * 50) + if event.message is not None: + print(event.message.text) + print("=" * 50) + elif isinstance(event, WorkflowOutputEvent): + final_output = str(event.data) if event.data is not None else None + + if stream_line_open: + print() + stream_line_open = False + + if final_output is not None: + print(f"\nWorkflow completed with result:\n\n{final_output}\n") + + # Wrap the workflow as an agent for composition scenarios + workflow_agent = workflow.as_agent(name="MagenticWorkflowAgent") + agent_result = await workflow_agent.run(task) + + if agent_result.messages: + print("\n===== as_agent() Transcript =====") + for i, msg in enumerate(agent_result.messages, start=1): + role_value = getattr(msg.role, "value", msg.role) + speaker = msg.author_name or role_value + print(f"{'-' * 50}\n{i:02d} [{speaker}]\n{msg.text}") + + except Exception as e: + print(f"Workflow execution failed: {e}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py new file mode 100644 index 0000000000..a50337135e --- /dev/null +++ b/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py @@ -0,0 +1,87 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from agent_framework import Role, SequentialBuilder +from agent_framework.azure import AzureOpenAIChatClient +from azure.identity import AzureCliCredential + +""" +Sample: Build a sequential workflow orchestration and wrap it as an agent. + +The script assembles a sequential conversation flow with `SequentialBuilder`, then +invokes the entire orchestration through the `workflow.as_agent(...)` interface so +other coordinators can reuse the chain as a single participant. + +Note on internal adapters: +- Sequential orchestration includes small adapter nodes for input normalization + ("input-conversation"), agent-response conversion ("to-conversation:"), + and completion ("complete"). These may appear as ExecutorInvoke/Completed events in + the stream—similar to how concurrent orchestration includes a dispatcher/aggregator. + You can safely ignore them when focusing on agent progress. + +Prerequisites: +- Azure OpenAI access configured for AzureOpenAIChatClient (use az login + env vars) +""" + + +async def main() -> None: + # 1) Create agents + chat_client = AzureOpenAIChatClient(credential=AzureCliCredential()) + + writer = chat_client.create_agent( + instructions=("You are a concise copywriter. Provide a single, punchy marketing sentence based on the prompt."), + name="writer", + ) + + reviewer = chat_client.create_agent( + instructions=("You are a thoughtful reviewer. Give brief feedback on the previous assistant message."), + name="reviewer", + ) + + # 2) Build sequential workflow: writer -> reviewer + workflow = SequentialBuilder().participants([writer, reviewer]).build() + + # 3) Treat the workflow itself as an agent for follow-up invocations + agent = workflow.as_agent(name="SequentialWorkflowAgent") + prompt = "Write a tagline for a budget-friendly eBike." + agent_response = await agent.run(prompt) + + if agent_response.messages: + print("\n===== Conversation =====") + for i, msg in enumerate(agent_response.messages, start=1): + role_value = getattr(msg.role, "value", msg.role) + normalized_role = str(role_value).lower() if role_value is not None else "assistant" + name = msg.author_name or ("assistant" if normalized_role == Role.ASSISTANT.value else "user") + print(f"{'-' * 60}\n{i:02d} [{name}]\n{msg.text}") + + """ + Sample Output: + + ===== Final Conversation ===== + ------------------------------------------------------------ + 01 [user] + Write a tagline for a budget-friendly eBike. + ------------------------------------------------------------ + 02 [writer] + Ride farther, spend less—your affordable eBike adventure starts here. + ------------------------------------------------------------ + 03 [reviewer] + This tagline clearly communicates affordability and the benefit of extended travel, making it + appealing to budget-conscious consumers. It has a friendly and motivating tone, though it could + be slightly shorter for more punch. Overall, a strong and effective suggestion! + + ===== as_agent() Conversation ===== + ------------------------------------------------------------ + 01 [writer] + Go electric, save big—your affordable ride awaits! + ------------------------------------------------------------ + 02 [reviewer] + Catchy and straightforward! The tagline clearly emphasizes both the electric aspect and the affordability of the + eBike. It's inviting and actionable. For even more impact, consider making it slightly shorter: + "Go electric, save big." Overall, this is an effective and appealing suggestion for a budget-friendly eBike. + """ + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_prompt_based_manager.py b/python/samples/getting_started/workflows/orchestration/group_chat_prompt_based_manager.py new file mode 100644 index 0000000000..6a6d3a5e22 --- /dev/null +++ b/python/samples/getting_started/workflows/orchestration/group_chat_prompt_based_manager.py @@ -0,0 +1,75 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging + +from agent_framework import AgentRunUpdateEvent, ChatAgent, GroupChatBuilder, WorkflowOutputEvent +from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient + +logging.basicConfig(level=logging.INFO) + +""" +Sample: Group Chat Orchestration (manager-directed) + +What it does: +- Demonstrates the generic GroupChatBuilder with a language-model manager directing two agents. +- The manager coordinates a researcher (chat completions) and a writer (responses API) to solve a task. +- Uses the default group chat orchestration pipeline shared with Magentic. + +Prerequisites: +- OpenAI environment variables configured for `OpenAIChatClient` and `OpenAIResponsesClient`. +""" + + +async def main() -> None: + researcher = ChatAgent( + name="Researcher", + description="Collects relevant background information.", + instructions="Gather concise facts that help a teammate answer the question.", + chat_client=OpenAIChatClient(model_id="gpt-4o-mini"), + ) + + writer = ChatAgent( + name="Writer", + description="Synthesizes a polished answer using the gathered notes.", + instructions="Compose clear and structured answers using any notes provided.", + chat_client=OpenAIResponsesClient(), + ) + + workflow = ( + GroupChatBuilder() + .set_prompt_based_manager(chat_client=OpenAIChatClient(), display_name="Coordinator") + .participants(researcher=researcher, writer=writer) + .build() + ) + + task = "Outline the core considerations for planning a community hackathon, and finish with a concise action plan." + + print("\nStarting Group Chat Workflow...\n") + print(f"TASK: {task}\n") + + final_response = None + last_executor_id: str | None = None + async for event in workflow.run_stream(task): + if isinstance(event, AgentRunUpdateEvent): + # Handle the streaming agent update as it's produced + eid = event.executor_id + if eid != last_executor_id: + if last_executor_id is not None: + print() + print(f"{eid}:", end=" ", flush=True) + last_executor_id = eid + print(event.data, end="", flush=True) + elif isinstance(event, WorkflowOutputEvent): + final_response = getattr(event.data, "text", str(event.data)) + + if final_response: + print("=" * 60) + print("FINAL RESPONSE") + print("=" * 60) + print(final_response) + print("=" * 60) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py b/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py new file mode 100644 index 0000000000..ba4d16accb --- /dev/null +++ b/python/samples/getting_started/workflows/orchestration/group_chat_simple_selector.py @@ -0,0 +1,110 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging + +from agent_framework import ChatAgent, GroupChatBuilder, GroupChatStateSnapshot, WorkflowOutputEvent +from agent_framework.openai import OpenAIChatClient + +logging.basicConfig(level=logging.INFO) + +""" +Sample: Group Chat with Simple Speaker Selector Function + +What it does: +- Demonstrates the select_speakers() API for GroupChat orchestration +- Uses a pure Python function to control speaker selection based on conversation state +- Alternates between researcher and writer agents in a simple round-robin pattern +- Shows how to access conversation history, round index, and participant metadata + +Key pattern: + def select_next_speaker(state: GroupChatStateSnapshot) -> str | None: + # state contains: task, participants, conversation, history, round_index + # Return participant name to continue, or None to finish + ... + +Prerequisites: +- OpenAI environment variables configured for OpenAIChatClient +""" + + +def select_next_speaker(state: GroupChatStateSnapshot) -> str | None: + """Simple speaker selector that alternates between researcher and writer. + + This function demonstrates the core pattern: + 1. Examine the current state of the group chat + 2. Decide who should speak next + 3. Return participant name or None to finish + + Args: + state: Immutable snapshot containing: + - task: ChatMessage - original user task + - participants: dict[str, str] - participant names → descriptions + - conversation: tuple[ChatMessage, ...] - full conversation history + - history: tuple[GroupChatTurn, ...] - turn-by-turn with speaker attribution + - round_index: int - number of selection rounds so far + - pending_agent: str | None - currently active agent (if any) + + Returns: + Name of next speaker, or None to finish the conversation + """ + round_idx = state["round_index"] + history = state["history"] + + # Finish after 4 turns (researcher → writer → researcher → writer) + if round_idx >= 4: + return None + + # Get the last speaker from history + last_speaker = history[-1].speaker if history else None + + # Simple alternation: researcher → writer → researcher → writer + if last_speaker == "Researcher": + return "Writer" + return "Researcher" + + +async def main() -> None: + researcher = ChatAgent( + name="Researcher", + description="Collects relevant background information.", + instructions="Gather concise facts that help answer the question. Be brief.", + chat_client=OpenAIChatClient(model_id="gpt-4o-mini"), + ) + + writer = ChatAgent( + name="Writer", + description="Synthesizes a polished answer using the gathered notes.", + instructions="Compose a clear, structured answer using any notes provided.", + chat_client=OpenAIChatClient(model_id="gpt-4o-mini"), + ) + + # Two ways to specify participants: + # 1. List form - uses agent.name attribute: .participants([researcher, writer]) + # 2. Dict form - explicit names: .participants(researcher=researcher, writer=writer) + workflow = ( + GroupChatBuilder() + .select_speakers(select_next_speaker, display_name="Orchestrator") + .participants([researcher, writer]) # Uses agent.name for participant names + .build() + ) + + task = "What are the key benefits of using async/await in Python?" + + print("\nStarting Group Chat with Simple Speaker Selector...\n") + print(f"TASK: {task}\n") + print("=" * 80) + + async for event in workflow.run_stream(task): + if isinstance(event, WorkflowOutputEvent): + final_message = event.data + author = getattr(final_message, "author_name", "Unknown") + text = getattr(final_message, "text", str(final_message)) + print(f"\n[{author}]\n{text}\n") + print("-" * 80) + + print("\nWorkflow completed.") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/orchestration/magentic.py b/python/samples/getting_started/workflows/orchestration/magentic.py index 95038cd0e4..5010172e2b 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic.py +++ b/python/samples/getting_started/workflows/orchestration/magentic.py @@ -9,8 +9,6 @@ MagenticAgentDeltaEvent, MagenticAgentMessageEvent, MagenticBuilder, - MagenticCallbackEvent, - MagenticCallbackMode, MagenticFinalResultEvent, MagenticOrchestratorMessageEvent, WorkflowOutputEvent, @@ -66,40 +64,6 @@ async def main() -> None: tools=HostedCodeInterpreterTool(), ) - # Unified callback - async def on_event(event: MagenticCallbackEvent) -> None: - """ - The `on_event` callback processes events emitted by the workflow. - Events include: orchestrator messages, agent delta updates, agent messages, and final result events. - """ - nonlocal last_stream_agent_id, stream_line_open - if isinstance(event, MagenticOrchestratorMessageEvent): - print(f"\n[ORCH:{event.kind}]\n\n{getattr(event.message, 'text', '')}\n{'-' * 26}") - elif isinstance(event, MagenticAgentDeltaEvent): - if last_stream_agent_id != event.agent_id or not stream_line_open: - if stream_line_open: - print() - print(f"\n[STREAM:{event.agent_id}]: ", end="", flush=True) - last_stream_agent_id = event.agent_id - stream_line_open = True - print(event.text, end="", flush=True) - elif isinstance(event, MagenticAgentMessageEvent): - if stream_line_open: - print(" (final)") - stream_line_open = False - print() - msg = event.message - if msg is not None: - response_text = (msg.text or "").replace("\n", " ") - print(f"\n[AGENT:{event.agent_id}] {msg.role.value}\n\n{response_text}\n{'-' * 26}") - elif isinstance(event, MagenticFinalResultEvent): - print("\n" + "=" * 50) - print("FINAL RESULT:") - print("=" * 50) - if event.message is not None: - print(event.message.text) - print("=" * 50) - print("\nBuilding Magentic Workflow...") # State used by on_agent_stream callback @@ -109,7 +73,6 @@ async def on_event(event: MagenticCallbackEvent) -> None: workflow = ( MagenticBuilder() .participants(researcher=researcher_agent, coder=coder_agent) - .on_event(on_event, mode=MagenticCallbackMode.STREAMING) .with_standard_manager( chat_client=OpenAIChatClient(), max_round_count=10, @@ -134,9 +97,39 @@ async def on_event(event: MagenticCallbackEvent) -> None: try: output: str | None = None async for event in workflow.run_stream(task): - print(event) - if isinstance(event, WorkflowOutputEvent): - output = str(event.data) + if isinstance(event, MagenticOrchestratorMessageEvent): + print(f"\n[ORCH:{event.kind}]\n\n{getattr(event.message, 'text', '')}\n{'-' * 26}") + elif isinstance(event, MagenticAgentDeltaEvent): + if last_stream_agent_id != event.agent_id or not stream_line_open: + if stream_line_open: + print() + print(f"\n[STREAM:{event.agent_id}]: ", end="", flush=True) + last_stream_agent_id = event.agent_id + stream_line_open = True + if event.text: + print(event.text, end="", flush=True) + elif isinstance(event, MagenticAgentMessageEvent): + if stream_line_open: + print(" (final)") + stream_line_open = False + print() + msg = event.message + if msg is not None: + response_text = (msg.text or "").replace("\n", " ") + print(f"\n[AGENT:{event.agent_id}] {msg.role.value}\n\n{response_text}\n{'-' * 26}") + elif isinstance(event, MagenticFinalResultEvent): + print("\n" + "=" * 50) + print("FINAL RESULT:") + print("=" * 50) + if event.message is not None: + print(event.message.text) + print("=" * 50) + elif isinstance(event, WorkflowOutputEvent): + output = str(event.data) if event.data is not None else None + + if stream_line_open: + print() + stream_line_open = False if output is not None: print(f"Workflow completed with result:\n\n{output}") diff --git a/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py b/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py index 2bec4c0f7d..fcd6d760ef 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py +++ b/python/samples/getting_started/workflows/orchestration/magentic_checkpoint.py @@ -113,7 +113,7 @@ async def main() -> None: print("No plan review request emitted; nothing to resume.") return - checkpoints = await checkpoint_storage.list_checkpoints(workflow.workflow.id) + checkpoints = await checkpoint_storage.list_checkpoints(workflow.id) if not checkpoints: print("No checkpoints persisted.") return @@ -141,7 +141,7 @@ async def main() -> None: # and then continues the workflow. Because we only captured the initial plan review # checkpoint, the resumed run should complete almost immediately. final_event: WorkflowOutputEvent | None = None - async for event in resumed_workflow.workflow.run_stream_from_checkpoint( + async for event in resumed_workflow.run_stream_from_checkpoint( resume_checkpoint.checkpoint_id, responses={plan_review_request_id: approval}, ): @@ -204,7 +204,7 @@ def _pending_message_count(cp: WorkflowCheckpoint) -> int: final_event_post: WorkflowOutputEvent | None = None post_emitted_events = False post_plan_workflow = build_workflow(checkpoint_storage) - async for event in post_plan_workflow.workflow.run_stream_from_checkpoint( + async for event in post_plan_workflow.run_stream_from_checkpoint( post_plan_checkpoint.checkpoint_id, responses={}, ): diff --git a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_update.py b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_update.py index 339554d3ec..5ba8b5cc23 100644 --- a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_update.py +++ b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_update.py @@ -10,8 +10,6 @@ MagenticAgentDeltaEvent, MagenticAgentMessageEvent, MagenticBuilder, - MagenticCallbackEvent, - MagenticCallbackMode, MagenticFinalResultEvent, MagenticOrchestratorMessageEvent, MagenticPlanReviewDecision, @@ -77,43 +75,11 @@ def on_exception(exception: Exception) -> None: last_stream_agent_id: str | None = None stream_line_open: bool = False - # Unified callback - async def on_event(event: MagenticCallbackEvent) -> None: - nonlocal last_stream_agent_id, stream_line_open - if isinstance(event, MagenticOrchestratorMessageEvent): - print(f"\n[ORCH:{event.kind}]\n\n{getattr(event.message, 'text', '')}\n{'-' * 26}") - elif isinstance(event, MagenticAgentDeltaEvent): - if last_stream_agent_id != event.agent_id or not stream_line_open: - if stream_line_open: - print() - print(f"\n[STREAM:{event.agent_id}]: ", end="", flush=True) - last_stream_agent_id = event.agent_id - stream_line_open = True - print(event.text, end="", flush=True) - elif isinstance(event, MagenticAgentMessageEvent): - if stream_line_open: - print(" (final)") - stream_line_open = False - print() - msg = event.message - if msg is not None: - response_text = (msg.text or "").replace("\n", " ") - print(f"\n[AGENT:{event.agent_id}] {msg.role.value}\n\n{response_text}\n{'-' * 26}") - elif isinstance(event, MagenticFinalResultEvent): - print("\n" + "=" * 50) - print("FINAL RESULT:") - print("=" * 50) - if event.message is not None: - print(event.message.text) - print("=" * 50) - print("\nBuilding Magentic Workflow...") workflow = ( MagenticBuilder() .participants(researcher=researcher_agent, coder=coder_agent) - .on_exception(on_exception) - .on_event(on_event, mode=MagenticCallbackMode.STREAMING) .with_standard_manager( chat_client=OpenAIChatClient(), max_round_count=10, @@ -150,11 +116,34 @@ async def on_event(event: MagenticCallbackEvent) -> None: stream = workflow.run_stream(task) # Collect events from the stream - events = [event async for event in stream] - pending_responses = None - - # Process events to find request info events, outputs, and completion status - for event in events: + async for event in stream: + if isinstance(event, MagenticOrchestratorMessageEvent): + print(f"\n[ORCH:{event.kind}]\n\n{getattr(event.message, 'text', '')}\n{'-' * 26}") + elif isinstance(event, MagenticAgentDeltaEvent): + if last_stream_agent_id != event.agent_id or not stream_line_open: + if stream_line_open: + print() + print(f"\n[STREAM:{event.agent_id}]: ", end="", flush=True) + last_stream_agent_id = event.agent_id + stream_line_open = True + if event.text: + print(event.text, end="", flush=True) + elif isinstance(event, MagenticAgentMessageEvent): + if stream_line_open: + print(" (final)") + stream_line_open = False + print() + msg = event.message + if msg is not None: + response_text = (msg.text or "").replace("\n", " ") + print(f"\n[AGENT:{event.agent_id}] {msg.role.value}\n\n{response_text}\n{'-' * 26}") + elif isinstance(event, MagenticFinalResultEvent): + print("\n" + "=" * 50) + print("FINAL RESULT:") + print("=" * 50) + if event.message is not None: + print(event.message.text) + print("=" * 50) if isinstance(event, RequestInfoEvent) and event.request_type is MagenticPlanReviewRequest: pending_request = event review_req = cast(MagenticPlanReviewRequest, event.data) @@ -162,9 +151,14 @@ async def on_event(event: MagenticCallbackEvent) -> None: print(f"\n=== PLAN REVIEW REQUEST ===\n{review_req.plan_text}\n") elif isinstance(event, WorkflowOutputEvent): # Capture workflow output during streaming - workflow_output = str(event.data) + workflow_output = str(event.data) if event.data else None completed = True + if stream_line_open: + print() + stream_line_open = False + pending_responses = None + # Handle pending plan review request if pending_request is not None: # Get human input for plan review decision diff --git a/python/samples/semantic-kernel-migration/README.md b/python/samples/semantic-kernel-migration/README.md index 7c18db8c5b..4e5e04a345 100644 --- a/python/samples/semantic-kernel-migration/README.md +++ b/python/samples/semantic-kernel-migration/README.md @@ -1,10 +1,11 @@ -# Copyright (c) Microsoft. All rights reserved. # Semantic Kernel → Microsoft Agent Framework Migration Samples This gallery helps Semantic Kernel (SK) developers move to the Microsoft Agent Framework (AF) with minimal guesswork. Each script pairs SK code with its AF equivalent so you can compare primitives, tooling, and orchestration patterns side by side while you migrate production workloads. ## What’s Included +## What’s Included + ### Chat completion parity - [01_basic_chat_completion.py](chat_completion/01_basic_chat_completion.py) — Minimal SK `ChatCompletionAgent` and AF `ChatAgent` conversation. - [02_chat_completion_with_tool.py](chat_completion/02_chat_completion_with_tool.py) — Adds a simple tool/function call in both SDKs. @@ -32,7 +33,8 @@ This gallery helps Semantic Kernel (SK) developers move to the Microsoft Agent F ### Orchestrations - [sequential.py](orchestrations/sequential.py) — Step-by-step SK Team → AF `SequentialBuilder` migration. - [concurrent_basic.py](orchestrations/concurrent_basic.py) — Concurrent orchestration parity. -- [handoff.py](orchestrations/handoff.py) — Support triage handoff migration with specialist routing. +- [group_chat.py](orchestrations/group_chat.py) — Group chat coordination with an LLM-backed manager in both SDKs. +- [handoff.py](orchestrations/handoff.py) - Handoff coordination between agents. - [magentic.py](orchestrations/magentic.py) — Magentic Team orchestration vs. AF builder wiring. ### Processes @@ -55,7 +57,7 @@ python samples/semantic-kernel-migration/chat_completion/01_basic_chat_completio Every script accepts no CLI arguments and will first call the SK implementation, followed by the AF version. Adjust the prompt or credentials inside the file as necessary before running. ## Running Orchestration & Workflow Samples -Advanced comparisons are split between `samples/semantic-kernel-migration/orchestrations` (Sequential, Concurrent, Group Chat, Handoff, Magentic) and `samples/semantic-kernel-migration/processes` (fan-out/fan-in, nested). You can run them directly, or isolate dependencies in a throwaway virtual environment: +Advanced comparisons are split between `samantic-kernel-migration/orchestrations` (Sequential, Concurrent, Magentic) and `samantic-kernel-migration/processes` (fan-out/fan-in, nested). You can run them directly, or isolate dependencies in a throwaway virtual environment: ``` cd samples/semantic-kernel-migration uv venv --python 3.10 .venv-migration diff --git a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py new file mode 100644 index 0000000000..42142b5363 --- /dev/null +++ b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py @@ -0,0 +1,266 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Side-by-side group chat orchestrations for Agent Framework and Semantic Kernel.""" + +import asyncio +import sys +from collections.abc import Sequence +from typing import Any, cast + +from agent_framework import ChatAgent, ChatMessage, GroupChatBuilder, WorkflowOutputEvent +from agent_framework.azure import AzureOpenAIChatClient, AzureOpenAIResponsesClient +from azure.identity import AzureCliCredential +from semantic_kernel.agents import Agent, ChatCompletionAgent, GroupChatOrchestration +from semantic_kernel.agents.orchestration.group_chat import ( + BooleanResult, + GroupChatManager, + MessageResult, + StringResult, +) +from semantic_kernel.agents.runtime import InProcessRuntime +from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.contents import AuthorRole, ChatHistory, ChatMessageContent +from semantic_kernel.functions import KernelArguments +from semantic_kernel.kernel import Kernel +from semantic_kernel.prompt_template import KernelPromptTemplate, PromptTemplateConfig + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + + +DISCUSSION_TOPIC = "What are the essential steps for launching a community hackathon?" + + +###################################################################### +# Semantic Kernel orchestration path +###################################################################### + + +def build_semantic_kernel_agents() -> list[Agent]: + credential = AzureCliCredential() + + researcher = ChatCompletionAgent( + name="Researcher", + description="Collects background information and potential resources.", + instructions=( + "Gather concise facts or considerations that help plan a community hackathon. " + "Keep your responses factual and scannable." + ), + service=AzureChatCompletion(credential=credential), + ) + + planner = ChatCompletionAgent( + name="Planner", + description="Synthesizes an actionable plan from available notes.", + instructions=( + "Use the running conversation to draft a structured action plan. Emphasize logistics and sequencing." + ), + service=AzureChatCompletion(credential=credential), + ) + + return [researcher, planner] + + +class ChatCompletionGroupChatManager(GroupChatManager): + """Group chat manager that delegates orchestration decisions to an Azure OpenAI deployment.""" + + service: ChatCompletionClientBase + topic: str + + termination_prompt: str = ( + "You are coordinating a conversation about '{{topic}}'. " + "Decide if the discussion has produced a solid answer. " + 'Respond using JSON: {"result": true|false, "reason": "..."}.' + ) + + selection_prompt: str = ( + "You are coordinating a conversation about '{{topic}}'. " + "Choose the next participant by returning JSON with keys (result, reason). " + "The result must match one of: {{participants}}." + ) + + summary_prompt: str = ( + "You have just finished a discussion about '{{topic}}'. " + "Summarize the plan and highlight key takeaways. Return JSON with keys (result, reason) where " + "result is the final response text." + ) + + def __init__(self, *, topic: str, service: ChatCompletionClientBase) -> None: + super().__init__(topic=topic, service=service) + self._round_robin_index = 0 + + async def _render_prompt(self, template: str, **kwargs: Any) -> str: + prompt_template = KernelPromptTemplate(prompt_template_config=PromptTemplateConfig(template=template)) + return await prompt_template.render(Kernel(), arguments=KernelArguments(**kwargs)) + + @override + async def should_request_user_input(self, chat_history: ChatHistory) -> BooleanResult: + return BooleanResult(result=False, reason="This orchestration is fully automated.") + + @override + async def should_terminate(self, chat_history: ChatHistory) -> BooleanResult: + rendered_prompt = await self._render_prompt(self.termination_prompt, topic=self.topic) + chat_history.messages.insert( + 0, + ChatMessageContent(role=AuthorRole.SYSTEM, content=rendered_prompt), + ) + chat_history.add_message( + ChatMessageContent(role=AuthorRole.USER, content="Decide if the discussion is complete."), + ) + + response = await self.service.get_chat_message_content( + chat_history, + settings=PromptExecutionSettings(response_format=BooleanResult), + ) + result = BooleanResult.model_validate_json(response.content) + return result + + @override + async def select_next_agent( + self, + chat_history: ChatHistory, + participant_descriptions: dict[str, str], + ) -> StringResult: + rendered_prompt = await self._render_prompt( + self.selection_prompt, + topic=self.topic, + participants=", ".join(participant_descriptions.keys()), + ) + chat_history.messages.insert( + 0, + ChatMessageContent(role=AuthorRole.SYSTEM, content=rendered_prompt), + ) + chat_history.add_message( + ChatMessageContent(role=AuthorRole.USER, content="Pick the next participant to speak."), + ) + + response = await self.service.get_chat_message_content( + chat_history, + settings=PromptExecutionSettings(response_format=StringResult), + ) + result = StringResult.model_validate_json(response.content) + if result.result not in participant_descriptions: + raise RuntimeError(f"Unknown participant selected: {result.result}") + return result + + @override + async def filter_results(self, chat_history: ChatHistory) -> MessageResult: + rendered_prompt = await self._render_prompt(self.summary_prompt, topic=self.topic) + chat_history.messages.insert( + 0, + ChatMessageContent(role=AuthorRole.SYSTEM, content=rendered_prompt), + ) + chat_history.add_message( + ChatMessageContent(role=AuthorRole.USER, content="Summarize the plan."), + ) + + response = await self.service.get_chat_message_content( + chat_history, + settings=PromptExecutionSettings(response_format=StringResult), + ) + string_result = StringResult.model_validate_json(response.content) + return MessageResult( + result=ChatMessageContent(role=AuthorRole.ASSISTANT, content=string_result.result), + reason=string_result.reason, + ) + + +async def sk_agent_response_callback(message: ChatMessageContent | Sequence[ChatMessageContent]) -> None: + if isinstance(message, ChatMessageContent): + messages: Sequence[ChatMessageContent] = [message] + elif isinstance(message, Sequence) and not isinstance(message, (str, bytes)): + messages = list(message) + else: + messages = [cast(ChatMessageContent, message)] + + for item in messages: + print(f"# {item.name}\n{item.content}\n") + + +async def run_semantic_kernel_example(task: str) -> str: + credential = AzureCliCredential() + orchestration = GroupChatOrchestration( + members=build_semantic_kernel_agents(), + manager=ChatCompletionGroupChatManager( + topic=DISCUSSION_TOPIC, + service=AzureChatCompletion(credential=credential), + max_rounds=8, + ), + agent_response_callback=sk_agent_response_callback, + ) + + runtime = InProcessRuntime() + runtime.start() + + try: + orchestration_result = await orchestration.invoke(task=task, runtime=runtime) + final_message = await orchestration_result.get(timeout=30) + if isinstance(final_message, ChatMessageContent): + return final_message.content or "" + return str(final_message) + finally: + await runtime.stop_when_idle() + + +###################################################################### +# Agent Framework orchestration path +###################################################################### + + +async def run_agent_framework_example(task: str) -> str: + credential = AzureCliCredential() + + researcher = ChatAgent( + name="Researcher", + description="Collects background information and potential resources.", + instructions=( + "Gather concise facts or considerations that help plan a community hackathon. " + "Keep your responses factual and scannable." + ), + chat_client=AzureOpenAIChatClient(credential=credential), + ) + + planner = ChatAgent( + name="Planner", + description="Turns the collected notes into a concrete action plan.", + instructions=("Propose a structured action plan that accounts for logistics, roles, and timeline."), + chat_client=AzureOpenAIResponsesClient(credential=credential), + ) + + workflow = ( + GroupChatBuilder() + .set_prompt_based_manager( + chat_client=AzureOpenAIChatClient(credential=credential), + display_name="Coordinator", + ) + .participants(researcher=researcher, planner=planner) + .build() + ) + + final_response = "" + async for event in workflow.run_stream(task): + if isinstance(event, WorkflowOutputEvent): + data = event.data + final_response = data.text or "" if isinstance(data, ChatMessage) else str(data) + return final_response + + +async def main() -> None: + task = "Kick off the group discussion." + + print("===== Agent Framework Group Chat =====") + af_response = await run_agent_framework_example(task) + print(af_response or "No response returned.") + print() + + print("===== Semantic Kernel Group Chat =====") + sk_response = await run_semantic_kernel_example(task) + print(sk_response or "No response returned.") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/semantic-kernel-migration/orchestrations/handoff.py b/python/samples/semantic-kernel-migration/orchestrations/handoff.py index ccb30d4f6c..2bf1f73665 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/handoff.py +++ b/python/samples/semantic-kernel-migration/orchestrations/handoff.py @@ -1,13 +1,10 @@ # Copyright (c) Microsoft. All rights reserved. """Side-by-side handoff orchestrations for Semantic Kernel and Agent Framework.""" -from __future__ import annotations - import asyncio import sys -from collections.abc import AsyncIterable, Sequence -from typing import Any, cast -from collections.abc import Iterator +from collections.abc import AsyncIterable, Iterator, Sequence +from typing import cast from agent_framework import ( ChatMessage, @@ -29,13 +26,12 @@ FunctionResultContent, StreamingChatMessageContent, ) -from semantic_kernel.functions import KernelArguments, kernel_function -from semantic_kernel.prompt_template import KernelPromptTemplate, PromptTemplateConfig +from semantic_kernel.functions import kernel_function if sys.version_info >= (3, 12): - from typing import override # pragma: no cover + pass # pragma: no cover else: - from typing_extensions import override # pragma: no cover + pass # pragma: no cover CUSTOMER_PROMPT = "I need help with order 12345. I want a replacement and need to know when it will arrive."