From 8f16503e6a97fb6ad21fc03200612533ac89be9e Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Wed, 15 May 2024 21:21:31 -0700 Subject: [PATCH 01/15] update sample notebook for export and running workflows, other qol fixes --- samples/apps/autogen-studio/.gitignore | 3 + .../autogenstudio/chatmanager.py | 72 +-- .../apps/autogen-studio/autogenstudio/cli.py | 35 +- .../autogenstudio/database/dbmanager.py | 39 +- .../autogenstudio/database/utils.py | 101 +++-- .../autogen-studio/autogenstudio/datamodel.py | 9 +- .../autogenstudio/utils/utils.py | 2 + .../autogen-studio/autogenstudio/version.py | 2 +- .../autogen-studio/autogenstudio/web/app.py | 24 +- .../autogen-studio/autogenstudio/web/serve.py | 30 ++ .../autogenstudio/workflowmanager.py | 132 +++++- .../frontend/src/components/types.ts | 2 +- .../frontend/src/components/utils.ts | 37 +- .../components/views/builder/utils/export.tsx | 207 +++++++++ .../src/components/views/builder/workflow.tsx | 66 ++- .../autogen-studio/notebooks/agent_spec.json | 38 -- .../notebooks/groupchat_spec.json | 103 ----- .../notebooks/travel_groupchat.json | 238 ++++++++++ .../autogen-studio/notebooks/tutorial.ipynb | 422 ++++++++---------- .../autogen-studio/notebooks/two_agent.json | 85 ++++ .../apps/autogen-studio/work_dir/skills.py | 55 +++ 21 files changed, 1113 insertions(+), 589 deletions(-) create mode 100644 samples/apps/autogen-studio/autogenstudio/web/serve.py create mode 100644 samples/apps/autogen-studio/frontend/src/components/views/builder/utils/export.tsx delete mode 100644 samples/apps/autogen-studio/notebooks/agent_spec.json delete mode 100644 samples/apps/autogen-studio/notebooks/groupchat_spec.json create mode 100644 samples/apps/autogen-studio/notebooks/travel_groupchat.json create mode 100644 samples/apps/autogen-studio/notebooks/two_agent.json create mode 100644 samples/apps/autogen-studio/work_dir/skills.py diff --git a/samples/apps/autogen-studio/.gitignore b/samples/apps/autogen-studio/.gitignore index e1e3c9942ec1..549ce16b6db9 100644 --- a/samples/apps/autogen-studio/.gitignore +++ b/samples/apps/autogen-studio/.gitignore @@ -9,6 +9,9 @@ autogenstudio/web/workdir/* autogenstudio/web/ui/* autogenstudio/web/skills/user/* .release.sh +.nightly.sh + +notebooks/work_dir/* # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/samples/apps/autogen-studio/autogenstudio/chatmanager.py b/samples/apps/autogen-studio/autogenstudio/chatmanager.py index 84b85673f07c..18f4a0634bdc 100644 --- a/samples/apps/autogen-studio/autogenstudio/chatmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/chatmanager.py @@ -82,76 +82,12 @@ def chat( connection_id=connection_id, ) - workflow = Workflow.model_validate(workflow) - message_text = message.content.strip() + result_message: Message = workflow_manager.run(message=f"{message_text}", clear_history=False, history=history) - start_time = time.time() - workflow_manager.run(message=f"{message_text}", clear_history=False) - end_time = time.time() - - metadata = { - "messages": workflow_manager.agent_history, - "summary_method": workflow.summary_method, - "time": end_time - start_time, - "files": get_modified_files(start_time, end_time, source_dir=work_dir), - } - - output = self._generate_output(message_text, workflow_manager, workflow) - - output_message = Message( - user_id=message.user_id, - role="assistant", - content=output, - meta=json.dumps(metadata), - session_id=message.session_id, - ) - - return output_message - - def _generate_output( - self, - message_text: str, - workflow_manager: WorkflowManager, - workflow: Workflow, - ) -> str: - """ - Generates the output response based on the workflow configuration and agent history. - - :param message_text: The text of the incoming message. - :param flow: An instance of `WorkflowManager`. - :param flow_config: An instance of `AgentWorkFlowConfig`. - :return: The output response as a string. - """ - - output = "" - if workflow.summary_method == "last": - successful_code_blocks = extract_successful_code_blocks(workflow_manager.agent_history) - last_message = ( - workflow_manager.agent_history[-1]["message"]["content"] if workflow_manager.agent_history else "" - ) - successful_code_blocks = "\n\n".join(successful_code_blocks) - output = (last_message + "\n" + successful_code_blocks) if successful_code_blocks else last_message - elif workflow.summary_method == "llm": - client = workflow_manager.receiver.client - status_message = SocketMessage( - type="agent_status", - data={ - "status": "summarizing", - "message": "Summarizing agent dialogue", - }, - connection_id=workflow_manager.connection_id, - ) - self.send(status_message.dict()) - output = summarize_chat_history( - task=message_text, - messages=workflow_manager.agent_history, - client=client, - ) - - elif workflow.summary_method == "none": - output = "" - return output + result_message.user_id = message.user_id + result_message.session_id = message.session_id + return result_message class WebSocketConnectionManager: diff --git a/samples/apps/autogen-studio/autogenstudio/cli.py b/samples/apps/autogen-studio/autogenstudio/cli.py index 42642bcd68af..81fee7991455 100644 --- a/samples/apps/autogen-studio/autogenstudio/cli.py +++ b/samples/apps/autogen-studio/autogenstudio/cli.py @@ -16,7 +16,7 @@ def ui( port: int = 8081, workers: int = 1, reload: Annotated[bool, typer.Option("--reload")] = False, - docs: bool = False, + docs: bool = True, appdir: str = None, database_uri: Optional[str] = None, ): @@ -48,6 +48,39 @@ def ui( ) +@app.command() +def serve( + workflow: str = "", + host: str = "127.0.0.1", + port: int = 8084, + workers: int = 1, + docs: bool = False, +): + """ + Serve an API Endpoint based on an AutoGen Studio workflow json file. + + Args: + workflow (str): Path to the workflow json file. + host (str, optional): Host to run the UI on. Defaults to 127.0.0.1 (localhost). + port (int, optional): Port to run the UI on. Defaults to 8081. + workers (int, optional): Number of workers to run the UI with. Defaults to 1. + reload (bool, optional): Whether to reload the UI on code changes. Defaults to False. + docs (bool, optional): Whether to generate API docs. Defaults to False. + + """ + + os.environ["AUTOGENSTUDIO_API_DOCS"] = str(docs) + os.environ["AUTOGENSTUDIO_WORKFLOW_FILE"] = workflow + + uvicorn.run( + "autogenstudio.web.serve:app", + host=host, + port=port, + workers=workers, + reload=False, + ) + + @app.command() def version(): """ diff --git a/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py b/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py index 00d3714b63fa..f964e3254cae 100644 --- a/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py @@ -1,3 +1,4 @@ +import threading from datetime import datetime from typing import Optional @@ -24,6 +25,8 @@ class DBManager: """A class to manage database operations""" + _init_lock = threading.Lock() # Class-level lock + def __init__(self, engine_uri: str): connection_args = {"check_same_thread": True} if "sqlite" in engine_uri else {} self.engine = create_engine(engine_uri, connect_args=connection_args) @@ -31,14 +34,15 @@ def __init__(self, engine_uri: str): def create_db_and_tables(self): """Create a new database and tables""" - try: - SQLModel.metadata.create_all(self.engine) + with self._init_lock: # Use the lock try: - init_db_samples(self) + SQLModel.metadata.create_all(self.engine) + try: + init_db_samples(self) + except Exception as e: + logger.info("Error while initializing database samples: " + str(e)) except Exception as e: - logger.info("Error while initializing database samples: " + str(e)) - except Exception as e: - logger.info("Error while creating database tables:" + str(e)) + logger.info("Error while creating database tables:" + str(e)) def upsert(self, model: SQLModel): """Create a new entity""" @@ -62,7 +66,7 @@ def upsert(self, model: SQLModel): session.refresh(model) except Exception as e: session.rollback() - logger.error("Error while upserting %s", e) + logger.error("Error while updating " + str(model_class.__name__) + ": " + str(e)) status = False response = Response( @@ -115,7 +119,7 @@ def get_items( session.rollback() status = False status_message = f"Error while fetching {model_class.__name__}" - logger.error("Error while getting %s: %s", model_class.__name__, e) + logger.error("Error while getting items: " + str(model_class.__name__) + " " + str(e)) response: Response = Response( message=status_message, @@ -157,16 +161,16 @@ def delete(self, model_class: SQLModel, filters: dict = None): status_message = f"{model_class.__name__} Deleted Successfully" else: print(f"Row with filters {filters} not found") - logger.info("Row with filters %s not found", filters) + logger.info("Row with filters + filters + not found") status_message = "Row not found" except exc.IntegrityError as e: session.rollback() - logger.error("Integrity ... Error while deleting: %s", e) + logger.error("Integrity ... Error while deleting: " + str(e)) status_message = f"The {model_class.__name__} is linked to another entity and cannot be deleted." status = False except Exception as e: session.rollback() - logger.error("Error while deleting: %s", e) + logger.error("Error while deleting: " + str(e)) status_message = f"Error while deleting: {e}" status = False response = Response( @@ -182,6 +186,7 @@ def get_linked_entities( primary_id: int, return_json: bool = False, agent_type: Optional[str] = None, + sequence_id: Optional[int] = None, ): """ Get all entities linked to the primary entity. @@ -222,10 +227,11 @@ def get_linked_entities( .where( WorkflowAgentLink.workflow_id == primary_id, WorkflowAgentLink.agent_type == agent_type, + WorkflowAgentLink.sequence_id == sequence_id, ) ).all() except Exception as e: - logger.error("Error while getting linked entities: %s", e) + logger.error("Error while getting linked entities: " + str(e)) status_message = f"Error while getting linked entities: {e}" status = False if return_json: @@ -245,6 +251,7 @@ def link( primary_id: int, secondary_id: int, agent_type: Optional[str] = None, + sequence_id: Optional[int] = None, ) -> Response: """ Link two entities together. @@ -357,6 +364,7 @@ def link( WorkflowAgentLink.workflow_id == primary_id, WorkflowAgentLink.agent_id == secondary_id, WorkflowAgentLink.agent_type == agent_type, + WorkflowAgentLink.sequence_id == sequence_id, ) ).first() if existing_link: @@ -373,6 +381,7 @@ def link( workflow_id=primary_id, agent_id=secondary_id, agent_type=agent_type, + sequence_id=sequence_id, ) session.add(workflow_agent_link) # add and commit the link @@ -385,7 +394,7 @@ def link( except Exception as e: session.rollback() - logger.error("Error while linking: %s", e) + logger.error("Error while linking: " + str(e)) status = False status_message = f"Error while linking due to an exception: {e}" @@ -402,6 +411,7 @@ def unlink( primary_id: int, secondary_id: int, agent_type: Optional[str] = None, + sequence_id: Optional[int] = None, ) -> Response: """ Unlink two entities. @@ -452,6 +462,7 @@ def unlink( WorkflowAgentLink.workflow_id == primary_id, WorkflowAgentLink.agent_id == secondary_id, WorkflowAgentLink.agent_type == agent_type, + WorkflowAgentLink.sequence_id == sequence_id, ) ).first() @@ -465,7 +476,7 @@ def unlink( except Exception as e: session.rollback() - logger.error("Error while unlinking: %s", e) + logger.error("Error while unlinking: " + str(e)) status = False status_message = f"Error while unlinking due to an exception: {e}" diff --git a/samples/apps/autogen-studio/autogenstudio/database/utils.py b/samples/apps/autogen-studio/autogenstudio/database/utils.py index c14003b414c3..bbad46b7d94f 100644 --- a/samples/apps/autogen-studio/autogenstudio/database/utils.py +++ b/samples/apps/autogen-studio/autogenstudio/database/utils.py @@ -23,6 +23,7 @@ Skill, Workflow, WorkflowAgentLink, + WorkFlowType, ) @@ -71,9 +72,12 @@ def get_agent(agent_id): agent_dict["agents"] = [get_agent(agent.id) for agent in agent.agents] return agent_dict + agents = [] for link in workflow_agent_links: agent_dict = get_agent(link.agent_id) + agents.append({"agent": agent_dict, "link": link.model_dump(mode="json")}) workflow[str(link.agent_type.value)] = agent_dict + return workflow @@ -141,9 +145,13 @@ def init_db_samples(dbmanager: Any): logger.info("Database already initialized with Default and Travel Planning Workflows") return logger.info("Initializing database with Default and Travel Planning Workflows") + # models - gpt_4_model = Model( - model="gpt-4-1106-preview", description="OpenAI GPT-4 model", user_id="guestuser@gmail.com", api_type="open_ai" + google_gemini_model = Model( + model="gemini-1.5-pro-latest", + description="Google's Gemini model", + user_id="guestuser@gmail.com", + api_type="google", ) azure_model = Model( model="gpt4-turbo", @@ -160,15 +168,11 @@ def init_db_samples(dbmanager: Any): api_type="open_ai", ) - google_gemini_model = Model( - model="gemini-1.5-pro-latest", - description="Google's Gemini model", - user_id="guestuser@gmail.com", - api_type="google", + gpt_4_model = Model( + model="gpt-4-1106-preview", description="OpenAI GPT-4 model", user_id="guestuser@gmail.com", api_type="open_ai" ) # skills - generate_image_skill = Skill( name="generate_images", description="Generate and save images based on a user's query.", @@ -177,32 +181,6 @@ def init_db_samples(dbmanager: Any): ) # agents - user_proxy_config = AgentConfig( - name="user_proxy", - description="User Proxy Agent Configuration", - human_input_mode="NEVER", - max_consecutive_auto_reply=25, - system_message="You are a helpful assistant", - code_execution_config=CodeExecutionConfigTypes.local, - default_auto_reply="TERMINATE", - llm_config=False, - ) - user_proxy = Agent( - user_id="guestuser@gmail.com", type=AgentType.userproxy, config=user_proxy_config.model_dump(mode="json") - ) - - painter_assistant_config = AgentConfig( - name="default_assistant", - description="Assistant Agent", - human_input_mode="NEVER", - max_consecutive_auto_reply=25, - system_message=AssistantAgent.DEFAULT_SYSTEM_MESSAGE, - code_execution_config=CodeExecutionConfigTypes.none, - llm_config={}, - ) - painter_assistant = Agent( - user_id="guestuser@gmail.com", type=AgentType.assistant, config=painter_assistant_config.model_dump(mode="json") - ) planner_assistant_config = AgentConfig( name="planner_assistant", @@ -245,7 +223,7 @@ def init_db_samples(dbmanager: Any): config=language_assistant_config.model_dump(mode="json"), ) - # group chat + # group chat agent travel_groupchat_config = AgentConfig( name="travel_groupchat", admin_name="groupchat", @@ -262,11 +240,48 @@ def init_db_samples(dbmanager: Any): user_id="guestuser@gmail.com", type=AgentType.groupchat, config=travel_groupchat_config.model_dump(mode="json") ) - # workflows - default_workflow = Workflow(name="Default Workflow", description="Default workflow", user_id="guestuser@gmail.com") + user_proxy_config = AgentConfig( + name="user_proxy", + description="User Proxy Agent Configuration", + human_input_mode="NEVER", + max_consecutive_auto_reply=25, + system_message="You are a helpful assistant", + code_execution_config=CodeExecutionConfigTypes.local, + default_auto_reply="TERMINATE", + llm_config=False, + ) + user_proxy = Agent( + user_id="guestuser@gmail.com", type=AgentType.userproxy, config=user_proxy_config.model_dump(mode="json") + ) + default_assistant_config = AgentConfig( + name="default_assistant", + description="Assistant Agent", + human_input_mode="NEVER", + max_consecutive_auto_reply=25, + system_message=AssistantAgent.DEFAULT_SYSTEM_MESSAGE, + code_execution_config=CodeExecutionConfigTypes.none, + llm_config={}, + ) + default_assistant = Agent( + user_id="guestuser@gmail.com", type=AgentType.assistant, config=default_assistant_config.model_dump(mode="json") + ) + + # workflows travel_workflow = Workflow( - name="Travel Planning Workflow", description="Travel workflow", user_id="guestuser@gmail.com" + name="Travel Planning Workflow", + description="Travel workflow", + user_id="guestuser@gmail.com", + sample_tasks=["Plan a 3 day trip to Hawaii Islands.", "Plan an eventful and exciting trip to Uzbeksitan."], + ) + default_workflow = Workflow( + name="Default Workflow", + description="Default workflow", + user_id="guestuser@gmail.com", + sample_tasks=[ + "paint a picture of a glass of ethiopian coffee, freshly brewed in a tall glass cup, on a table right in front of a lush green forest scenery", + "Plot the stock price of NVIDIA YTD.", + ], ) with Session(dbmanager.engine) as session: @@ -276,25 +291,25 @@ def init_db_samples(dbmanager: Any): session.add(gpt_4_model) session.add(generate_image_skill) session.add(user_proxy) - session.add(painter_assistant) + session.add(default_assistant) session.add(travel_groupchat_agent) session.add(planner_assistant) session.add(local_assistant) session.add(language_assistant) - session.add(default_workflow) session.add(travel_workflow) + session.add(default_workflow) session.commit() - dbmanager.link(link_type="agent_model", primary_id=painter_assistant.id, secondary_id=gpt_4_model.id) - dbmanager.link(link_type="agent_skill", primary_id=painter_assistant.id, secondary_id=generate_image_skill.id) + dbmanager.link(link_type="agent_model", primary_id=default_assistant.id, secondary_id=gpt_4_model.id) + dbmanager.link(link_type="agent_skill", primary_id=default_assistant.id, secondary_id=generate_image_skill.id) dbmanager.link( link_type="workflow_agent", primary_id=default_workflow.id, secondary_id=user_proxy.id, agent_type="sender" ) dbmanager.link( link_type="workflow_agent", primary_id=default_workflow.id, - secondary_id=painter_assistant.id, + secondary_id=default_assistant.id, agent_type="receiver", ) diff --git a/samples/apps/autogen-studio/autogenstudio/datamodel.py b/samples/apps/autogen-studio/autogenstudio/datamodel.py index 3dbd46c357ee..5614153a8851 100644 --- a/samples/apps/autogen-studio/autogenstudio/datamodel.py +++ b/samples/apps/autogen-studio/autogenstudio/datamodel.py @@ -164,6 +164,7 @@ class WorkflowAgentType(str, Enum): sender = "sender" receiver = "receiver" planner = "planner" + sequential = "sequential" class WorkflowAgentLink(SQLModel, table=True): @@ -174,6 +175,7 @@ class WorkflowAgentLink(SQLModel, table=True): default=WorkflowAgentType.sender, sa_column=Column(SqlEnum(WorkflowAgentType), primary_key=True), ) + sequence_id: Optional[int] = None class AgentLink(SQLModel, table=True): @@ -218,8 +220,8 @@ class Agent(SQLModel, table=True): class WorkFlowType(str, Enum): - twoagents = "twoagents" - groupchat = "groupchat" + autonomous = "autonomous" + sequential = "sequential" class WorkFlowSummaryMethod(str, Enum): @@ -243,11 +245,12 @@ class Workflow(SQLModel, table=True): name: str description: str agents: List[Agent] = Relationship(back_populates="workflows", link_model=WorkflowAgentLink) - type: WorkFlowType = Field(default=WorkFlowType.twoagents, sa_column=Column(SqlEnum(WorkFlowType))) + type: WorkFlowType = Field(default=WorkFlowType.autonomous, sa_column=Column(SqlEnum(WorkFlowType))) summary_method: Optional[WorkFlowSummaryMethod] = Field( default=WorkFlowSummaryMethod.last, sa_column=Column(SqlEnum(WorkFlowSummaryMethod)), ) + sample_tasks: Optional[List[str]] = Field(default_factory=list, sa_column=Column(JSON)) class Response(SQLModel): diff --git a/samples/apps/autogen-studio/autogenstudio/utils/utils.py b/samples/apps/autogen-studio/autogenstudio/utils/utils.py index ed533ec3883c..e570e3b8e158 100644 --- a/samples/apps/autogen-studio/autogenstudio/utils/utils.py +++ b/samples/apps/autogen-studio/autogenstudio/utils/utils.py @@ -307,6 +307,8 @@ def get_skills_from_prompt(skills: List[Skill], work_dir: str) -> str: """ prompt = "" # filename: skills.py for skill in skills: + if not isinstance(skill, Skill): + skill = Skill(**skill) prompt += f""" ##### Begin of {skill.name} ##### diff --git a/samples/apps/autogen-studio/autogenstudio/version.py b/samples/apps/autogen-studio/autogenstudio/version.py index bafe37f75b14..1421652f8a3b 100644 --- a/samples/apps/autogen-studio/autogenstudio/version.py +++ b/samples/apps/autogen-studio/autogenstudio/version.py @@ -1,3 +1,3 @@ -VERSION = "0.0.56rc9" +VERSION = "0.0.56rc11" __version__ = VERSION APP_NAME = "autogenstudio" diff --git a/samples/apps/autogen-studio/autogenstudio/web/app.py b/samples/apps/autogen-studio/autogenstudio/web/app.py index 76ab8139ebc3..ada9c436d731 100644 --- a/samples/apps/autogen-studio/autogenstudio/web/app.py +++ b/samples/apps/autogen-studio/autogenstudio/web/app.py @@ -92,8 +92,15 @@ async def lifespan(app: FastAPI): allow_headers=["*"], ) - -api = FastAPI(root_path="/api") +show_docs = os.environ.get("AUTOGENSTUDIO_API_DOCS", "False").lower() == "true" +docs_url = "/docs" if show_docs else None +api = FastAPI( + root_path="/api", + title="AutoGen Studio API", + version=VERSION, + docs_url=docs_url, + description="AutoGen Studio is a low-code tool for building and testing multi-agent workflows using AutoGen.", +) # mount an api route such that the main route serves the ui and the /api app.mount("/api", api) @@ -293,6 +300,19 @@ async def get_workflow(workflow_id: int, user_id: str): return list_entity(Workflow, filters=filters) +@api.get("/workflows/export/{workflow_id}") +async def export_workflow(workflow_id: int, user_id: str): + """Export a user workflow""" + response = Response(message="Workflow exported successfully", status=True, data=None) + try: + workflow_details = workflow_from_id(workflow_id, dbmanager=dbmanager) + response.data = workflow_details + except Exception as ex_error: + response.message = "Error occurred while exporting workflow: " + str(ex_error) + response.status = False + return response.model_dump(mode="json") + + @api.post("/workflows") async def create_workflow(workflow: Workflow): """Create a new workflow""" diff --git a/samples/apps/autogen-studio/autogenstudio/web/serve.py b/samples/apps/autogen-studio/autogenstudio/web/serve.py new file mode 100644 index 000000000000..462615378b8a --- /dev/null +++ b/samples/apps/autogen-studio/autogenstudio/web/serve.py @@ -0,0 +1,30 @@ +# loads a fast api api endpoint with a single endpoint that takes text query and return a response + +import json +import os + +from fastapi import FastAPI + +from ..datamodel import Response +from ..workflowmanager import WorkflowManager + +app = FastAPI() +workflow_file_path = os.environ.get("AUTOGENSTUDIO_WORKFLOW_FILE", None) + + +if workflow_file_path: + workflow_manager = WorkflowManager(workflow=workflow_file_path) +else: + raise ValueError("Workflow file must be specified") + + +@app.get("/predict/{task}") +async def predict(task: str): + response = Response(message="Task successfully completed", status=True, data=None) + try: + result_message = workflow_manager.run(message=task, clear_history=False) + response.data = result_message + except Exception as e: + response.message = str(e) + response.status = False + return response diff --git a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py index 8b41caab4285..1bbdfd6b758b 100644 --- a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py @@ -1,4 +1,6 @@ +import json import os +import time from datetime import datetime from typing import Any, Dict, List, Optional, Union @@ -9,18 +11,27 @@ AgentType, Message, SocketMessage, + WorkFlowSummaryMethod, + WorkFlowType, +) +from .utils import ( + clear_folder, + get_modified_files, + get_skills_from_prompt, + load_code_execution_config, + sanitize_model, + summarize_chat_history, ) -from .utils import clear_folder, get_skills_from_prompt, load_code_execution_config, sanitize_model class WorkflowManager: """ - AutoGenWorkFlowManager class to load agents from a provided configuration and run a chat between them + WorkflowManager class to load agents from a provided configuration and run a chat between them. """ def __init__( self, - workflow: Dict, + workflow: Union[Dict, str], history: Optional[List[Message]] = None, work_dir: str = None, clear_work_dir: bool = True, @@ -28,27 +39,59 @@ def __init__( connection_id: Optional[str] = None, ) -> None: """ - Initializes the AutoGenFlow with agents specified in the config and optional - message history. + Initializes the WorkflowManager with agents specified in the config and optional message history. Args: - config: The configuration settings for the sender and receiver agents. - history: An optional list of previous messages to populate the agents' history. - + workflow (Union[Dict, str]): The workflow configuration. This can be a dictionary or a string which is a path to a JSON file. + history (Optional[List[Message]]): The message history. + work_dir (str): The working directory. + clear_work_dir (bool): If set to True, clears the working directory. + send_message_function (Optional[callable]): The function to send messages. + connection_id (Optional[str]): The connection identifier. """ + if isinstance(workflow, str): + if os.path.isfile(workflow): + with open(workflow, "r") as file: + self.workflow = json.load(file) + else: + raise FileNotFoundError(f"The file {workflow} does not exist.") + elif isinstance(workflow, dict): + self.workflow = workflow + else: + raise ValueError("The 'workflow' parameter should be either a dictionary or a valid JSON file path") + # TODO - improved typing for workflow self.send_message_function = send_message_function self.connection_id = connection_id self.work_dir = work_dir or "work_dir" if clear_work_dir: clear_folder(self.work_dir) - self.workflow = workflow - self.sender = self.load(workflow.get("sender")) - self.receiver = self.load(workflow.get("receiver")) self.agent_history = [] + self.history = history or [] + self.sender = None + self.receiver = None + + def _run_workflow(self, message: str, history: Optional[List[Message]] = None, clear_history: bool = False) -> None: + """ + Runs the workflow based on the provided configuration. - if history: - self._populate_history(history) + Args: + message: The initial message to start the chat. + history: A list of messages to populate the agents' history. + clear_history: If set to True, clears the chat history before initiating. + + """ + + if self.workflow.get("type", None) == WorkFlowType.autonomous.value: + self.sender = self.load(self.workflow.get("sender")) + self.receiver = self.load(self.workflow.get("receiver")) + if history: + self._populate_history(history) + self.sender.initiate_chat( + self.receiver, + message=message, + clear_history=clear_history, + ) def _serialize_agent( self, @@ -241,7 +284,7 @@ def load(self, agent: Any) -> autogen.Agent: raise ValueError(f"Unknown agent type: {agent.type}") return agent - def run(self, message: str, clear_history: bool = False) -> None: + def run(self, message: str, history: Optional[List[Message]] = None, clear_history: bool = False) -> Message: """ Initiates a chat between the sender and receiver agents with an initial message and an option to clear the history. @@ -250,11 +293,64 @@ def run(self, message: str, clear_history: bool = False) -> None: message: The initial message to start the chat. clear_history: If set to True, clears the chat history before initiating. """ - self.sender.initiate_chat( - self.receiver, - message=message, - clear_history=clear_history, + + start_time = time.time() + self._run_workflow(message=message, history=history, clear_history=clear_history) + end_time = time.time() + + output = self._generate_output(message, self.workflow.get("summary_method", "last")) + + result_message = Message( + content=output, + role="assistant", + meta={ + "messages": self.agent_history, + "summary_method": self.workflow.get("summary_method", "last"), + "time": end_time - start_time, + "files": get_modified_files(start_time, end_time, source_dir=self.work_dir), + }, ) + return result_message + + def _generate_output( + self, + message_text: str, + summary_method: str, + ) -> str: + """ + Generates the output response based on the workflow configuration and agent history. + + :param message_text: The text of the incoming message. + :param flow: An instance of `WorkflowManager`. + :param flow_config: An instance of `AgentWorkFlowConfig`. + :return: The output response as a string. + """ + + output = "" + if summary_method == WorkFlowSummaryMethod.last: + (self.agent_history) + last_message = self.agent_history[-1]["message"]["content"] if self.agent_history else "" + output = last_message + elif summary_method == WorkFlowSummaryMethod.llm: + client = self.receiver.client + status_message = SocketMessage( + type="agent_status", + data={ + "status": "summarizing", + "message": "Summarizing agent dialogue", + }, + connection_id=self.connection_id, + ) + self.send_message_function(status_message.model_dump(mode="json")) + output = summarize_chat_history( + task=message_text, + messages=self.agent_history, + client=client, + ) + + elif summary_method == "none": + output = "" + return output class ExtendedConversableAgent(autogen.ConversableAgent): diff --git a/samples/apps/autogen-studio/frontend/src/components/types.ts b/samples/apps/autogen-studio/frontend/src/components/types.ts index eba391446028..9d1221f539a9 100644 --- a/samples/apps/autogen-studio/frontend/src/components/types.ts +++ b/samples/apps/autogen-studio/frontend/src/components/types.ts @@ -65,7 +65,7 @@ export interface IWorkflow { description: string; sender: IAgent; receiver: IAgent; - type: "twoagents" | "groupchat"; + type: "autonomous" | "sequential"; created_at?: string; updated_at?: string; summary_method?: "none" | "last" | "llm"; diff --git a/samples/apps/autogen-studio/frontend/src/components/utils.ts b/samples/apps/autogen-studio/frontend/src/components/utils.ts index 2264f5c66a21..8720a4ca106c 100644 --- a/samples/apps/autogen-studio/frontend/src/components/utils.ts +++ b/samples/apps/autogen-studio/frontend/src/components/utils.ts @@ -357,7 +357,7 @@ export const sampleAgentConfig = (agent_type: string = "assistant") => { } }; -export const sampleWorkflowConfig = (type = "twoagents") => { +export const sampleWorkflowConfig = (type = "autonomous") => { const llm_model_config: IModelConfig[] = []; const llm_config: ILLMConfig = { @@ -402,42 +402,9 @@ export const sampleWorkflowConfig = (type = "twoagents") => { description: "Default Agent Workflow", sender: userProxyFlowSpec, receiver: assistantFlowSpec, - type: "twoagents", + type: "autonomous", }; - const groupChatAssistantConfig = Object.assign( - { - admin_name: "groupchat_assistant", - messages: [], - max_round: 10, - speaker_selection_method: "auto", - allow_repeat_speaker: false, - description: "Group Chat Assistant", - }, - assistantConfig - ); - groupChatAssistantConfig.name = "groupchat_assistant"; - groupChatAssistantConfig.system_message = - "You are a helpful assistant skilled at cordinating a group of other assistants to solve a task. "; - - const groupChatFlowSpec: IAgent = { - type: "groupchat", - config: groupChatAssistantConfig, - }; - - const groupChatWorkFlowConfig: IWorkflow = { - name: "Default Group Workflow", - description: "Default Group Workflow", - sender: userProxyFlowSpec, - receiver: groupChatFlowSpec, - type: "groupchat", - }; - - if (type === "twoagents") { - return workFlowConfig; - } else if (type === "groupchat") { - return groupChatWorkFlowConfig; - } return workFlowConfig; }; diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/export.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/export.tsx new file mode 100644 index 000000000000..bb74bd0e2e37 --- /dev/null +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/export.tsx @@ -0,0 +1,207 @@ +import { Button, Modal, message } from "antd"; +import * as React from "react"; +import { IWorkflow } from "../../../types"; +import { ArrowDownTrayIcon } from "@heroicons/react/24/outline"; +import { + checkAndSanitizeInput, + fetchJSON, + getServerUrl, + sanitizeConfig, +} from "../../../utils"; +import { appContext } from "../../../../hooks/provider"; +import { CodeBlock } from "../../../atoms"; + +export const ExportWorkflowModal = ({ + workflow, + show, + setShow, +}: { + workflow: IWorkflow | null; + show: boolean; + setShow: (show: boolean) => void; +}) => { + const serverUrl = getServerUrl(); + const { user } = React.useContext(appContext); + + const [error, setError] = React.useState(null); + const [loading, setLoading] = React.useState(false); + const [workflowDetails, setWorkflowDetails] = React.useState(null); + + const getWorkflowCode = (workflow: IWorkflow) => { + const workflowCode = `from autogenstudio import WorkflowManager +# load workflow from exported json workflow file. +workflow_manager = WorkflowManager(workflow="path/to/your/workflow_.json") + +# run the workflow on a task +task_query = "What is the height of the Eiffel Tower?. Dont write code, just respond to the question." +workflow_manager.run(message=task_query)`; + return workflowCode; + }; + + const getCliWorkflowCode = (workflow: IWorkflow) => { + const workflowCode = `autogenstudio serve --workflow=workflow.json --port=5000 + `; + return workflowCode; + }; + + const getGunicornWorkflowCode = (workflow: IWorkflow) => { + const workflowCode = `gunicorn -w $((2 * $(getconf _NPROCESSORS_ONLN) + 1)) --timeout 12600 -k uvicorn.workers.UvicornWorker autogenstudio.web.app:app --bind `; + + return workflowCode; + }; + + const fetchWorkFlow = (workflow: IWorkflow) => { + setError(null); + setLoading(true); + // const fetch; + const payLoad = { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + }; + const downloadWorkflowUrl = `${serverUrl}/workflows/export/${workflow.id}?user_id=${user?.email}`; + + const onSuccess = (data: any) => { + if (data && data.status) { + setWorkflowDetails(data.data); + console.log("workflow details", data.data); + + const sanitized_name = + checkAndSanitizeInput(workflow.name).sanitizedText || workflow.name; + const file_name = `workflow_${sanitized_name}.json`; + const workflowData = sanitizeConfig(data.data); + const file = new Blob([JSON.stringify(workflowData)], { + type: "application/json", + }); + const downloadUrl = URL.createObjectURL(file); + const a = document.createElement("a"); + a.href = downloadUrl; + a.download = file_name; + a.click(); + } else { + message.error(data.message); + } + setLoading(false); + }; + const onError = (err: any) => { + setError(err); + message.error(err.message); + setLoading(false); + }; + fetchJSON(downloadWorkflowUrl, payLoad, onSuccess, onError); + }; + + React.useEffect(() => { + if (workflow && workflow.id && show) { + // fetchWorkFlow(workflow.id); + console.log("workflow modal ... component loaded", workflow); + } + }, [show]); + + return ( + + Export Workflow + + {workflow?.name} + {" "} + + } + width={800} + open={show} + onOk={() => { + setShow(false); + }} + onCancel={() => { + setShow(false); + }} + footer={[]} + > +
+
+ {" "} + You can use the following steps to start integrating your workflow + into your application.{" "} +
+ {workflow && workflow.id && ( + <> +
+
+
Step 1
+
+ Download your workflow as a JSON file by clicking the button + below. +
+ +
+ +
+
+ +
+
Step 2
+
+ Copy the following code snippet and paste it into your + application to run your workflow on a task. +
+
+ +
+
+
+ +
+
+ Step 3 (Deploy) +
+
+ You can also deploy your workflow as an API endpoint using the + autogenstudio python CLI. +
+ +
+ + +
+ Note: this will start a endpoint on port 5000. You can change + the port by changing the port number. You can also scale this + using multiple workers (e.g., via an application server like + gunicorn) or wrap it in a docker container and deploy on a + cloud provider like Azure. +
+ + +
+
+ + )} +
+
+ ); +}; diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/workflow.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/workflow.tsx index 12800de8022d..2cbe3f6150e3 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/workflow.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/workflow.tsx @@ -1,6 +1,7 @@ import { ArrowDownTrayIcon, ArrowUpTrayIcon, + CodeBracketSquareIcon, DocumentDuplicateIcon, InformationCircleIcon, PlusIcon, @@ -22,6 +23,7 @@ import { } from "../../utils"; import { BounceLoader, Card, CardHoverBar, LoadingOverlay } from "../../atoms"; import { WorflowViewer } from "./utils/workflowconfig"; +import { ExportWorkflowModal } from "./utils/export"; const WorkflowView = ({}: any) => { const [loading, setLoading] = React.useState(false); @@ -37,6 +39,8 @@ const WorkflowView = ({}: any) => { const [workflows, setWorkflows] = React.useState([]); const [selectedWorkflow, setSelectedWorkflow] = React.useState(null); + const [selectedExportWorkflow, setSelectedExportWorkflow] = + React.useState(null); const defaultConfig = sampleWorkflowConfig(); const [newWorkflow, setNewWorkflow] = React.useState( @@ -119,9 +123,21 @@ const WorkflowView = ({}: any) => { } }, [selectedWorkflow]); + const [showExportModal, setShowExportModal] = React.useState(false); + const workflowRows = (workflows || []).map( (workflow: IWorkflow, i: number) => { const cardItems = [ + { + title: "Export", + icon: CodeBracketSquareIcon, + onClick: (e: any) => { + e.stopPropagation(); + setSelectedExportWorkflow(workflow); + setShowExportModal(true); + }, + hoverText: "Export", + }, { title: "Download", icon: ArrowDownTrayIcon, @@ -285,28 +301,28 @@ const WorkflowView = ({}: any) => { }; const workflowTypes: MenuProps["items"] = [ - { - key: "twoagents", - label: ( -
- {" "} - - Two Agents -
- ), - }, - { - key: "groupchat", - label: ( -
- - Group Chat -
- ), - }, - { - type: "divider", - }, + // { + // key: "twoagents", + // label: ( + //
+ // {" "} + // + // Two Agents + //
+ // ), + // }, + // { + // key: "groupchat", + // label: ( + //
+ // + // Group Chat + //
+ // ), + // }, + // { + // type: "divider", + // }, { key: "uploadworkflow", label: ( @@ -352,6 +368,12 @@ const WorkflowView = ({}: any) => { }} /> + +
diff --git a/samples/apps/autogen-studio/notebooks/agent_spec.json b/samples/apps/autogen-studio/notebooks/agent_spec.json deleted file mode 100644 index 72d1e21ef1a7..000000000000 --- a/samples/apps/autogen-studio/notebooks/agent_spec.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "name": "General Agent Workflow", - "description": "A general agent workflow", - "sender": { - "type": "userproxy", - "config": { - "name": "userproxy", - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 5, - "system_message": "", - "llm_config": false, - "code_execution_config": { - "work_dir": null, - "use_docker": false - } - } - }, - "receiver": { - "type": "assistant", - "config": { - "name": "primary_assistant", - "llm_config": { - "config_list": [ - { - "model": "gpt-4-1106-preview" - } - ], - "temperature": 0.1, - "timeout": 600, - "cache_seed": 42 - }, - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 8, - "system_message": "You are a helpful assistant that can use available functions when needed to solve problems. At each point, do your best to determine if the user's request has been addressed. IF THE REQUEST HAS NOT BEEN ADDRESSED, RESPOND WITH CODE TO ADDRESS IT. IF A FAILURE OCCURRED (e.g., due to a missing library) AND SOME ADDITIONAL CODE WAS WRITTEN (e.g. code to install the library), ENSURE THAT THE ORIGINAL CODE TO ADDRESS THE TASK STILL GETS EXECUTED. If the request HAS been addressed, respond with a summary of the result. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request ' or 'The tallest mountain in Africa is ..' etc. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." - } - }, - "type": "twoagents" -} diff --git a/samples/apps/autogen-studio/notebooks/groupchat_spec.json b/samples/apps/autogen-studio/notebooks/groupchat_spec.json deleted file mode 100644 index 21cced7135b7..000000000000 --- a/samples/apps/autogen-studio/notebooks/groupchat_spec.json +++ /dev/null @@ -1,103 +0,0 @@ -{ - "name": "Travel Agent Group Chat Workflow", - "description": "A group chat workflow", - "type": "groupchat", - "sender": { - "type": "userproxy", - "config": { - "name": "userproxy", - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 5, - "system_message": "", - "llm_config": false, - "code_execution_config": { - "work_dir": null, - "use_docker": false - } - } - }, - "receiver": { - "type": "groupchat", - "description": "A group chat workflow", - "config": { - "name": "group_chat_manager", - "llm_config": { - "config_list": [ - { - "model": "gpt-4-1106-preview" - } - ], - "temperature": 0.1, - "timeout": 600, - "cache_seed": 42 - }, - "human_input_mode": "NEVER", - "system_message": "Group chat manager" - }, - "groupchat_config": { - "admin_name": "Admin", - "max_round": 10, - "speaker_selection_method": "auto", - - "agents": [ - { - "type": "assistant", - "config": { - "name": "primary_assistant", - "llm_config": { - "config_list": [ - { - "model": "gpt-4-1106-preview" - } - ], - "temperature": 0.1, - "timeout": 600, - "cache_seed": 42 - }, - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 8, - "system_message": "You are a helpful assistant that can suggest a travel itinerary for a user. You are the primary cordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant). You must ensure that the finally plan integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN that ends with the word TERMINATE. " - } - }, - { - "type": "assistant", - "config": { - "name": "local_assistant", - "llm_config": { - "config_list": [ - { - "model": "gpt-4-1106-preview" - } - ], - "temperature": 0.1, - "timeout": 600, - "cache_seed": 42 - }, - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 8, - "system_message": "You are a helpful assistant that can review travel plans, providing critical feedback on how the trip can be enriched for enjoyment of the local culture. If the plan already includes local experiences, you can mention that the plan is satisfactory, with rationale." - } - }, - { - "type": "assistant", - "config": { - "name": "language_assistant", - "llm_config": { - "config_list": [ - { - "model": "gpt-4-1106-preview" - } - ], - "temperature": 0.1, - "timeout": 600, - "cache_seed": 42 - }, - "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 8, - "system_message": "You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale." - } - } - ] - } - } -} diff --git a/samples/apps/autogen-studio/notebooks/travel_groupchat.json b/samples/apps/autogen-studio/notebooks/travel_groupchat.json new file mode 100644 index 000000000000..6a94e92fe565 --- /dev/null +++ b/samples/apps/autogen-studio/notebooks/travel_groupchat.json @@ -0,0 +1,238 @@ +{ + "name": "Travel Planning Workflow", + "type": "twoagents", + "user_id": "guestuser@gmail.com", + "description": "Travel workflow", + "summary_method": "last", + "sender": { + "type": "userproxy", + "user_id": "guestuser@gmail.com", + "config": { + "name": "user_proxy", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful assistant", + "is_termination_msg": null, + "code_execution_config": "local", + "default_auto_reply": "TERMINATE", + "description": "User Proxy Agent Configuration", + "llm_config": false, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "skills": [], + "models": [], + "agents": [] + }, + "receiver": { + "type": "groupchat", + "user_id": "guestuser@gmail.com", + "config": { + "name": "travel_groupchat", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a group chat manager", + "is_termination_msg": null, + "code_execution_config": "none", + "default_auto_reply": "TERMINATE", + "description": "Group Chat Agent Configuration", + "llm_config": { + "config_list": [ + { + "api_type": "open_ai", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "temperature": 0, + "cache_seed": null, + "timeout": null, + "max_tokens": 1000, + "extra_body": null + }, + "admin_name": "groupchat", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "skills": [], + "models": [ + { + "user_id": "guestuser@gmail.com", + "api_type": "open_ai", + "description": "OpenAI GPT-4 model", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "agents": [ + { + "type": "userproxy", + "user_id": "guestuser@gmail.com", + "config": { + "name": "user_proxy", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful assistant", + "is_termination_msg": null, + "code_execution_config": "local", + "default_auto_reply": "TERMINATE", + "description": "User Proxy Agent Configuration", + "llm_config": false, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "skills": [], + "models": [], + "agents": [] + }, + { + "type": "assistant", + "user_id": "guestuser@gmail.com", + "config": { + "name": "planner_assistant", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful assistant that can suggest a travel plan for a user. You are the primary cordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant). You must ensure that the finally plan integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.", + "is_termination_msg": null, + "code_execution_config": "none", + "default_auto_reply": "", + "description": "Assistant Agent", + "llm_config": { + "config_list": [ + { + "api_type": "open_ai", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "temperature": 0, + "cache_seed": null, + "timeout": null, + "max_tokens": 1000, + "extra_body": null + }, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "skills": [], + "models": [ + { + "user_id": "guestuser@gmail.com", + "api_type": "open_ai", + "description": "OpenAI GPT-4 model", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "agents": [] + }, + { + "type": "assistant", + "user_id": "guestuser@gmail.com", + "config": { + "name": "local_assistant", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a local assistant that can suggest local activities or places to visit for a user. You can suggest local activities, places to visit, restaurants to eat at, etc. You can also provide information about the weather, local events, etc. You can provide information about the local area, but you cannot suggest a complete travel plan. You can only provide information about the local area.", + "is_termination_msg": null, + "code_execution_config": "none", + "default_auto_reply": "", + "description": "Local Assistant Agent", + "llm_config": { + "config_list": [ + { + "api_type": "open_ai", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "temperature": 0, + "cache_seed": null, + "timeout": null, + "max_tokens": 1000, + "extra_body": null + }, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "skills": [], + "models": [ + { + "user_id": "guestuser@gmail.com", + "api_type": "open_ai", + "description": "OpenAI GPT-4 model", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "agents": [] + }, + { + "type": "assistant", + "user_id": "guestuser@gmail.com", + "config": { + "name": "language_assistant", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale.", + "is_termination_msg": null, + "code_execution_config": "none", + "default_auto_reply": "", + "description": "Language Assistant Agent", + "llm_config": { + "config_list": [ + { + "api_type": "open_ai", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "temperature": 0, + "cache_seed": null, + "timeout": null, + "max_tokens": 1000, + "extra_body": null + }, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "skills": [], + "models": [ + { + "user_id": "guestuser@gmail.com", + "api_type": "open_ai", + "description": "OpenAI GPT-4 model", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "agents": [] + } + ] + } +} diff --git a/samples/apps/autogen-studio/notebooks/tutorial.ipynb b/samples/apps/autogen-studio/notebooks/tutorial.ipynb index 7e80f17b7b55..800b09635242 100644 --- a/samples/apps/autogen-studio/notebooks/tutorial.ipynb +++ b/samples/apps/autogen-studio/notebooks/tutorial.ipynb @@ -4,11 +4,18 @@ "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/homebrew/Caskroom/miniconda/base/envs/autog/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], "source": [ - "import json\n", - "\n", - "from autogenstudio import AgentWorkFlowConfig, AutoGenWorkFlowManager" + "from autogenstudio import WorkflowManager" ] }, { @@ -35,31 +42,37 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[33muserproxy\u001b[0m (to primary_assistant):\n", + "\u001b[33muser_proxy\u001b[0m (to default_assistant):\n", "\n", - "What is the height of the Eiffel Tower?. Dont write code, just respond to the question.\n", + "What is the height of the Eiffel Tower?. Dont write code, just respond to the question.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ "\n", "--------------------------------------------------------------------------------\n", - "\u001b[33mprimary_assistant\u001b[0m (to userproxy):\n", + "\u001b[33mdefault_assistant\u001b[0m (to user_proxy):\n", "\n", - "The Eiffel Tower is approximately 300 meters tall, not including antennas, and with the antennas, it reaches about 330 meters. TERMINATE.\n", + "The Eiffel Tower is approximately 300 meters (984 feet) tall, not including antennas. With antennas, it reaches a height of about 330 meters (1,083 feet).\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to default_assistant):\n", + "\n", + "TERMINATE\n", "\n", "--------------------------------------------------------------------------------\n" ] } ], "source": [ - "# load an agent specification in JSON\n", - "agent_spec = json.load(open(\"agent_spec.json\"))\n", - "\n", - "# Create a An AutoGen Workflow Configuration from the agent specification\n", - "agent_work_flow_config = AgentWorkFlowConfig(**agent_spec)\n", + "# load workflow from json file\n", + "workflow_manager = WorkflowManager(workflow=\"two_agent.json\")\n", "\n", - "agent_work_flow = AutoGenWorkFlowManager(agent_work_flow_config)\n", - "\n", - "# # Run the workflow on a task\n", + "# run the workflow on a task\n", "task_query = \"What is the height of the Eiffel Tower?. Dont write code, just respond to the question.\"\n", - "agent_work_flow.run(message=task_query)" + "workflow_manager.run(message=task_query)" ] }, { @@ -70,16 +83,29 @@ { "data": { "text/plain": [ - "[{'recipient': 'primary_assistant',\n", - " 'sender': 'userproxy',\n", - " 'message': 'What is the height of the Eiffel Tower?. Dont write code, just respond to the question.',\n", - " 'timestamp': '2024-02-07T12:34:35.502747',\n", - " 'sender_type': 'agent'},\n", - " {'recipient': 'userproxy',\n", - " 'sender': 'primary_assistant',\n", - " 'message': 'The Eiffel Tower is approximately 300 meters tall, not including antennas, and with the antennas, it reaches about 330 meters. TERMINATE.',\n", - " 'timestamp': '2024-02-07T12:34:35.508855',\n", - " 'sender_type': 'agent'}]" + "[{'recipient': 'default_assistant',\n", + " 'sender': 'user_proxy',\n", + " 'message': {'content': 'What is the height of the Eiffel Tower?. Dont write code, just respond to the question.',\n", + " 'role': 'user'},\n", + " 'timestamp': '2024-05-14T16:20:29.048237',\n", + " 'sender_type': 'agent',\n", + " 'connection_id': None,\n", + " 'message_type': 'agent_message'},\n", + " {'recipient': 'user_proxy',\n", + " 'sender': 'default_assistant',\n", + " 'message': {'content': 'The Eiffel Tower is approximately 300 meters (984 feet) tall, not including antennas. With antennas, it reaches a height of about 330 meters (1,083 feet).',\n", + " 'role': 'user'},\n", + " 'timestamp': '2024-05-14T16:20:31.797875',\n", + " 'sender_type': 'agent',\n", + " 'connection_id': None,\n", + " 'message_type': 'agent_message'},\n", + " {'recipient': 'default_assistant',\n", + " 'sender': 'user_proxy',\n", + " 'message': {'content': 'TERMINATE', 'role': 'user'},\n", + " 'timestamp': '2024-05-14T16:20:31.799830',\n", + " 'sender_type': 'agent',\n", + " 'connection_id': None,\n", + " 'message_type': 'agent_message'}]" ] }, "execution_count": 3, @@ -88,7 +114,8 @@ } ], "source": [ - "agent_work_flow.agent_history" + "# print the agent history\n", + "workflow_manager.agent_history" ] }, { @@ -107,290 +134,205 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[33muserproxy\u001b[0m (to group_chat_manager):\n", + "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", "plan a two day trip to Maui hawaii\n", "\n", "--------------------------------------------------------------------------------\n", - "\u001b[33mprimary_assistant\u001b[0m (to group_chat_manager):\n", + "\u001b[33mplanner_assistant\u001b[0m (to chat_manager):\n", "\n", - "To plan a two-day trip to Maui, Hawaii, we'll need to consider your interests, preferences for activities, and the logistics of travel within the island. Here's a basic itinerary that we can refine with more details:\n", + "To plan a two-day trip to Maui, Hawaii, we'll need to consider several aspects such as accommodation, transportation, activities, and dining options. Here's a preliminary outline for your trip:\n", "\n", - "**Day 1: Exploring West Maui**\n", + "**Day 1: Arrival and Exploration**\n", "\n", - "- Morning:\n", - " - Arrival at Kahului Airport (OGG).\n", - " - Pick up rental car.\n", - " - Breakfast at a local café near the airport.\n", - " - Drive to Lahaina, a historic whaling village.\n", + "- **Morning:**\n", + " - Arrive at Kahului Airport (OGG).\n", + " - Pick up a rental car (consider booking in advance).\n", + " - Check into your hotel or accommodation. (I'll need to know your preferences to suggest options.)\n", "\n", - "- Midday:\n", - " - Visit Lahaina Historic Trail for a self-guided walking tour.\n", - " - Lunch at a seaside restaurant in Lahaina.\n", + "- **Afternoon:**\n", + " - Lunch at a local restaurant (I'll get suggestions from the local_assistant).\n", + " - Visit the Iao Valley State Park to see the Iao Needle and take a short hike.\n", "\n", - "- Afternoon:\n", - " - Snorkeling tour at Ka'anapali Beach.\n", - " - Relax on the beach or by the hotel pool.\n", + "- **Evening:**\n", + " - Dinner at a recommended restaurant (local_assistant will provide options).\n", + " - Relax at the hotel or explore local shops and the beach.\n", "\n", - "- Evening:\n", - " - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau.\n", - " - Return to hotel for overnight stay.\n", + "**Day 2: Adventure and Relaxation**\n", "\n", - "**Day 2: The Road to Hana**\n", + "- **Morning:**\n", + " - Drive to Haleakalā National Park to witness the sunrise (requires early reservation).\n", + " - Breakfast at a café near the park (suggestions to come from local_assistant).\n", "\n", - "- Early Morning:\n", - " - Check out of the hotel.\n", - " - Grab a quick breakfast and coffee to go.\n", + "- **Afternoon:**\n", + " - Visit the Maui Ocean Center or take a snorkeling trip to Molokini Crater.\n", + " - Lunch at a seaside restaurant (local_assistant will advise).\n", "\n", - "- Morning to Afternoon:\n", - " - Begin the scenic drive on the Road to Hana.\n", - " - Stop at Twin Falls for a short hike and swim.\n", - " - Visit Waianapanapa State Park to see the black sand beach.\n", - " - Picnic lunch at one of the many lookout points.\n", + "- **Evening:**\n", + " - Attend a traditional Luau for dinner and entertainment (booking required, I'll get options).\n", + " - Return to the hotel to pack and prepare for departure.\n", "\n", - "- Mid to Late Afternoon:\n", - " - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\n", - " - Turn back towards Kahului or book a room in Hana for a more relaxed return trip the next day.\n", + "**Additional Considerations:**\n", + "- Language tips and cultural etiquette (language_assistant will provide insights).\n", + "- COVID-19 protocols and safety measures (I'll update you on the latest requirements).\n", "\n", - "- Evening:\n", - " - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay.\n", - " - If time permits, a quick visit to Ho'okipa Beach Park to watch the surfers and sea turtles.\n", + "I will now gather more specific suggestions from the local_assistant and language_assistant to complete your travel plan. Please hold on.\n", "\n", - "- Night:\n", - " - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mlocal_assistant\u001b[0m (to chat_manager):\n", "\n", - "This itinerary is just a starting point. Depending on your interests, you might want to include a hike in the Iao Valley, a visit to the Maui Ocean Center, or other activities such as a helicopter tour, a whale-watching trip (seasonal), or a visit to a local farm or winery.\n", + "As your local assistant, I can provide you with more detailed suggestions for your two-day trip to Maui, Hawaii. Here's a refined plan with local insights:\n", "\n", - "Now, let's refine this itinerary with suggestions from our local_assistant and language_assistant to ensure we're considering all the best local advice and any language or cultural tips that might enhance your trip. \n", + "**Day 1: Arrival and Exploration**\n", "\n", - "[Waiting for input from local_assistant and language_assistant to finalize the itinerary.]\n", + "- **Morning:**\n", + " - Upon arrival, consider renting a car from companies like Hertz, Enterprise, or a local rental agency.\n", + " - For accommodations, you might want to look into hotels in the Wailea area for luxury stays or Kihei for more budget-friendly options.\n", "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mlocal_assistant\u001b[0m (to group_chat_manager):\n", + "- **Afternoon:**\n", + " - Enjoy a local lunch at Paia Fish Market in Paia Town, known for its fresh seafood.\n", + " - Head to Iao Valley State Park and enjoy the lush scenery and historical significance.\n", "\n", - "As the primary assistant, I've provided a basic itinerary for a two-day trip to Maui, Hawaii. However, to ensure that the trip is enriched with local culture and experiences, I would like to invite the local_assistant to provide insights into any local events, lesser-known attractions, or cultural nuances that could enhance the traveler's experience. Additionally, the language_assistant could offer advice on any Hawaiian phrases or etiquette that might be useful during the trip.\n", + "- **Evening:**\n", + " - Dine at Mama's Fish House in Paia for a high-end dinner with ocean views.\n", + " - Stroll through Lahaina Town and check out the local art galleries and shops.\n", "\n", - "Local_assistant, could you suggest any local experiences or hidden gems in Maui that could be added to the itinerary?\n", + "**Day 2: Adventure and Relaxation**\n", "\n", - "Language_assistant, could you provide some useful Hawaiian phrases and cultural etiquette tips for a traveler visiting Maui for the first time?\n", + "- **Morning:**\n", + " - For Haleakalā sunrise, make sure to reserve your spot well in advance as it's required for sunrise viewing.\n", + " - Grab breakfast at Kula Lodge with a view of the West Maui Mountains.\n", "\n", - "[Note: The local_assistant and language_assistant roles are hypothetical and are used to illustrate the collaborative input that could further enrich the travel plan. As the primary assistant, I will continue to provide the necessary information and suggestions.]\n", + "- **Afternoon:**\n", + " - If you choose the Maui Ocean Center, you'll find it in Ma'alaea with exhibits of local marine life.\n", + " - For snorkeling, consider a tour operator like Pacific Whale Foundation for trips to Molokini Crater.\n", + " - Have lunch at Café O'Lei at the Dunes for a mix of local and international cuisine.\n", "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mlocal_assistant\u001b[0m (to group_chat_manager):\n", + "- **Evening:**\n", + " - Book a Luau, such as the Old Lahaina Luau or the Feast at Lele, for a cultural experience with food and performances.\n", + " - Enjoy your last evening and prepare for your departure the next day.\n", "\n", - "As your primary assistant, I'll incorporate the cultural and linguistic aspects into your Maui trip plan to ensure a rich and authentic experience.\n", + "**Weather and Events:**\n", + "- Maui typically has warm weather year-round, but it's always good to check the forecast closer to your trip for any unexpected changes.\n", + "- Check local event calendars for any festivals or special events that might be happening during your visit.\n", "\n", - "**Cultural Enrichment:**\n", + "Remember to book activities and dining in advance, especially during peak tourist seasons. Enjoy your trip to Maui!\n", "\n", - "- **Local Cuisine:** Make sure to try traditional Hawaiian dishes such as poke, laulau, and poi. Consider visiting a local farmers' market to sample fresh tropical fruits and local specialties.\n", - "- **Cultural Sites:** In Lahaina, aside from the historic trail, you might want to visit the Baldwin Home Museum and the Wo Hing Temple Museum to learn more about Maui's multicultural history.\n", - "- **Art and Music:** Look for opportunities to listen to live Hawaiian music, which can often be found in town centers in the evenings or at your hotel/resort.\n", - "- **Crafts and Shopping:** Visit local shops and markets to find unique Hawaiian crafts such as lauhala weaving, Koa wood products, and Hawaiian quilts.\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mlanguage_assistant\u001b[0m (to chat_manager):\n", + "\n", + "As the language assistant, I can provide you with some language tips and communication strategies for your trip to Maui, Hawaii. While English is the primary language spoken in Hawaii, the Hawaiian language (ʻŌlelo Hawaiʻi) is an official language of the state and is proudly preserved and used in many cultural contexts.\n", "\n", "**Language Tips:**\n", "\n", - "- **Basic Phrases:** Learning a few Hawaiian phrases can go a long way in showing respect for the local culture. Here are some to get you started:\n", + "- **Basic Hawaiian Phrases:**\n", " - Aloha - Hello, goodbye, love\n", " - Mahalo - Thank you\n", - " - E komo mai - Welcome\n", + " - E kala mai - Excuse me\n", " - A hui hou - Until we meet again\n", - "- **Pronunciation:** Hawaiian words are pronounced with every vowel spoken. For example, \"Haleakalā\" is pronounced \"Ha-lay-ah-ka-lah.\"\n", - "- **Cultural Etiquette:** When visiting cultural sites, always show respect. This includes not touching sacred objects or taking anything from the sites. Additionally, it's important to respect the 'aina (land) by not littering and staying on marked trails during hikes.\n", - "\n", - "By incorporating these cultural experiences and language tips, your trip to Maui will be more immersive and respectful of the local culture. Enjoy your adventure in this beautiful Hawaiian island!\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mlanguage_assistant\u001b[0m (to group_chat_manager):\n", - "\n", - "Thank you for the local insights and language tips. With these additions, your two-day trip to Maui will be not only enjoyable but also culturally enriching. Here's the updated itinerary with the local and language enhancements:\n", - "\n", - "**Updated Two-Day Maui Itinerary**\n", - "\n", - "**Day 1: Exploring West Maui with Cultural Insights**\n", + " - ʻOno - Delicious or tasty\n", "\n", - "- Morning:\n", - " - Arrival at Kahului Airport (OGG).\n", - " - Pick up rental car.\n", - " - Breakfast at a local café, trying a Hawaiian breakfast specialty.\n", - " - Drive to Lahaina, a historic whaling village.\n", + "- **Cultural Etiquette:**\n", + " - Use \"Aloha\" and \"Mahalo\" generously. They are key expressions of the friendly Hawaiian spirit.\n", + " - Be respectful when visiting cultural sites and heed any posted kapu (taboo) signs, which indicate restricted or sacred areas.\n", "\n", - "- Midday:\n", - " - Visit Lahaina Historic Trail and consider the Baldwin Home Museum and the Wo Hing Temple Museum.\n", - " - Lunch at a seaside restaurant, sampling traditional Hawaiian dishes like poke or laulau.\n", + "- **Pronunciation:**\n", + " - Hawaiian words are phonetic and every letter is pronounced. For example, \"Haleakalā\" is pronounced \"Ha-lay-ah-ka-lah.\"\n", + " - Pay attention to the ʻokina (‘), which is a glottal stop, similar to the sound between the syllables of \"oh-oh.\"\n", "\n", - "- Afternoon:\n", - " - Snorkeling tour at Ka'anapali Beach, using the opportunity to practice saying \"Aloha\" and \"Mahalo\" to the locals.\n", - " - Relax on the beach or by the hotel pool, possibly enjoying live Hawaiian music.\n", + "- **Learning Resources:**\n", + " - Consider downloading a Hawaiian language app or a phrasebook to familiarize yourself with common words and phrases.\n", + " - Listen to Hawaiian music or watch videos to get accustomed to the pronunciation.\n", "\n", - "- Evening:\n", - " - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau, immersing yourself in Hawaiian culture and cuisine.\n", - " - Return to hotel for overnight stay.\n", + "**Communication Strategies:**\n", "\n", - "**Day 2: The Road to Hana with a Focus on Nature and Culture**\n", + "- **English Proficiency:**\n", + " - English is widely spoken, so you should have no trouble communicating with locals and service providers.\n", "\n", - "- Early Morning:\n", - " - Check out of the hotel.\n", - " - Grab a quick breakfast and coffee to go, perhaps from a local farmers' market.\n", + "- **Signage and Information:**\n", + " - All important signs, menus, and information will typically be available in English.\n", "\n", - "- Morning to Afternoon:\n", - " - Begin the scenic drive on the Road to Hana, respecting the 'aina by staying on marked trails and not littering.\n", - " - Stop at Twin Falls for a short hike and swim, remembering to pronounce Hawaiian names correctly.\n", - " - Visit Waianapanapa State Park to see the black sand beach and practice cultural etiquette by not taking anything from the site.\n", + "- **Cultural Sensitivity:**\n", + " - Show respect for the Hawaiian culture and traditions. If you're unsure about something, it's okay to ask questions politely.\n", "\n", - "- Mid to Late Afternoon:\n", - " - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\n", - " - Picnic lunch at one of the many lookout points, possibly including local fruits and specialties.\n", - "\n", - "- Evening:\n", - " - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay, using Hawaiian phrases to express gratitude.\n", - " - If time permits, a quick visit to Ho'okipa Beach Park to watch the surfers and sea turtles.\n", - "\n", - "- Night:\n", - " - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\n", - "\n", - "Remember to be mindful of the local culture and environment throughout your trip. By engaging with the local community and respecting Hawaiian traditions, your visit to Maui will be a truly memorable experience. Enjoy your journey to this beautiful island paradise!\n", + "Since English is the primary language in Hawaii, you should not encounter significant language barriers. However, showing an interest in the Hawaiian language and culture can enhance your experience and is appreciated by locals. Your travel plan does not specifically mention language preparation, but given the widespread use of English, you should be well-equipped to communicate effectively during your trip. If you wish to immerse yourself more deeply in the local culture, learning a few Hawaiian phrases and understanding the cultural context would be beneficial.\n", "\n", "--------------------------------------------------------------------------------\n", - "\u001b[33mprimary_assistant\u001b[0m (to group_chat_manager):\n", - "\n", - "**Final Two-Day Maui Itinerary with Cultural and Language Integration**\n", + "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", - "**Day 1: Exploring West Maui with Cultural Insights**\n", - "\n", - "- Morning:\n", - " - Arrival at Kahului Airport (OGG).\n", - " - Pick up rental car.\n", - " - Breakfast at a local café, trying a Hawaiian breakfast specialty.\n", - " - Drive to Lahaina, a historic whaling village.\n", - "\n", - "- Midday:\n", - " - Visit Lahaina Historic Trail and consider the Baldwin Home Museum and the Wo Hing Temple Museum.\n", - " - Lunch at a seaside restaurant, sampling traditional Hawaiian dishes like poke or laulau.\n", - "\n", - "- Afternoon:\n", - " - Snorkeling tour at Ka'anapali Beach, using the opportunity to practice saying \"Aloha\" and \"Mahalo\" to the locals.\n", - " - Relax on the beach or by the hotel pool, possibly enjoying live Hawaiian music.\n", - "\n", - "- Evening:\n", - " - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau, immersing yourself in Hawaiian culture and cuisine.\n", - " - Return to hotel for overnight stay.\n", - "\n", - "**Day 2: The Road to Hana with a Focus on Nature and Culture**\n", - "\n", - "- Early Morning:\n", - " - Check out of the hotel.\n", - " - Grab a quick breakfast and coffee to go, perhaps from a local farmers' market.\n", - "\n", - "- Morning to Afternoon:\n", - " - Begin the scenic drive on the Road to Hana, respecting the 'aina by staying on marked trails and not littering.\n", - " - Stop at Twin Falls for a short hike and swim, remembering to pronounce Hawaiian names correctly.\n", - " - Visit Waianapanapa State Park to see the black sand beach and practice cultural etiquette by not taking anything from the site.\n", - "\n", - "- Mid to Late Afternoon:\n", - " - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\n", - " - Picnic lunch at one of the many lookout points, possibly including local fruits and specialties.\n", - "\n", - "- Evening:\n", - " - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay, using Hawaiian phrases to express gratitude.\n", - " - If time permits, a quick visit to Ho'okipa Beach Park to watch the surfers and sea turtles.\n", - "\n", - "- Night:\n", - " - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\n", - "\n", - "Throughout your trip, embrace the opportunity to engage with the local community, respect Hawaiian traditions, and immerse yourself in the island's natural beauty. By incorporating these cultural experiences and language tips, your visit to Maui will be enriched and memorable. Have a fantastic journey to this enchanting island paradise! TERMINATE\n", + "TERMINATE\n", "\n", "--------------------------------------------------------------------------------\n" ] } ], "source": [ - "# load an agent specification in JSON\n", - "agent_spec = json.load(open(\"groupchat_spec.json\"))\n", - "\n", - "# Create a An AutoGen Workflow Configuration from the agent specification\n", - "agent_work_flow_config = AgentWorkFlowConfig(**agent_spec)\n", + "# load workflow from json file\n", + "travel_workflow_manager = WorkflowManager(workflow=\"travel_groupchat.json\")\n", "\n", - "# Create a Workflow from the configuration\n", - "group_agent_work_flow = AutoGenWorkFlowManager(agent_work_flow_config)\n", - "\n", - "# Run the workflow on a task\n", + "# run the workflow on a task\n", "task_query = \"plan a two day trip to Maui hawaii\"\n", - "group_agent_work_flow.run(message=task_query)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "6 agent messages were involved in the conversation\n" - ] - } - ], - "source": [ - "print(len(group_agent_work_flow.agent_history), \"agent messages were involved in the conversation\")" + "travel_workflow_manager.run(message=task_query)" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[{'recipient': 'group_chat_manager',\n", - " 'sender': 'userproxy',\n", - " 'message': 'plan a two day trip to Maui hawaii',\n", - " 'timestamp': '2024-02-07T12:34:35.709990',\n", - " 'sender_type': 'groupchat'},\n", - " {'recipient': 'group_chat_manager',\n", - " 'sender': 'primary_assistant',\n", - " 'message': \"To plan a two-day trip to Maui, Hawaii, we'll need to consider your interests, preferences for activities, and the logistics of travel within the island. Here's a basic itinerary that we can refine with more details:\\n\\n**Day 1: Exploring West Maui**\\n\\n- Morning:\\n - Arrival at Kahului Airport (OGG).\\n - Pick up rental car.\\n - Breakfast at a local café near the airport.\\n - Drive to Lahaina, a historic whaling village.\\n\\n- Midday:\\n - Visit Lahaina Historic Trail for a self-guided walking tour.\\n - Lunch at a seaside restaurant in Lahaina.\\n\\n- Afternoon:\\n - Snorkeling tour at Ka'anapali Beach.\\n - Relax on the beach or by the hotel pool.\\n\\n- Evening:\\n - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau.\\n - Return to hotel for overnight stay.\\n\\n**Day 2: The Road to Hana**\\n\\n- Early Morning:\\n - Check out of the hotel.\\n - Grab a quick breakfast and coffee to go.\\n\\n- Morning to Afternoon:\\n - Begin the scenic drive on the Road to Hana.\\n - Stop at Twin Falls for a short hike and swim.\\n - Visit Waianapanapa State Park to see the black sand beach.\\n - Picnic lunch at one of the many lookout points.\\n\\n- Mid to Late Afternoon:\\n - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\\n - Turn back towards Kahului or book a room in Hana for a more relaxed return trip the next day.\\n\\n- Evening:\\n - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay.\\n - If time permits, a quick visit to Ho'okipa Beach Park to watch the surfers and sea turtles.\\n\\n- Night:\\n - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\\n\\nThis itinerary is just a starting point. Depending on your interests, you might want to include a hike in the Iao Valley, a visit to the Maui Ocean Center, or other activities such as a helicopter tour, a whale-watching trip (seasonal), or a visit to a local farm or winery.\\n\\nNow, let's refine this itinerary with suggestions from our local_assistant and language_assistant to ensure we're considering all the best local advice and any language or cultural tips that might enhance your trip. \\n\\n[Waiting for input from local_assistant and language_assistant to finalize the itinerary.]\",\n", - " 'timestamp': '2024-02-07T12:34:35.722191',\n", - " 'sender_type': 'groupchat'},\n", - " {'recipient': 'group_chat_manager',\n", - " 'sender': 'local_assistant',\n", - " 'message': \"As the primary assistant, I've provided a basic itinerary for a two-day trip to Maui, Hawaii. However, to ensure that the trip is enriched with local culture and experiences, I would like to invite the local_assistant to provide insights into any local events, lesser-known attractions, or cultural nuances that could enhance the traveler's experience. Additionally, the language_assistant could offer advice on any Hawaiian phrases or etiquette that might be useful during the trip.\\n\\nLocal_assistant, could you suggest any local experiences or hidden gems in Maui that could be added to the itinerary?\\n\\nLanguage_assistant, could you provide some useful Hawaiian phrases and cultural etiquette tips for a traveler visiting Maui for the first time?\\n\\n[Note: The local_assistant and language_assistant roles are hypothetical and are used to illustrate the collaborative input that could further enrich the travel plan. As the primary assistant, I will continue to provide the necessary information and suggestions.]\",\n", - " 'timestamp': '2024-02-07T12:34:35.731563',\n", - " 'sender_type': 'groupchat'},\n", - " {'recipient': 'group_chat_manager',\n", + "[{'recipient': 'chat_manager',\n", + " 'sender': 'user_proxy',\n", + " 'message': {'content': 'plan a two day trip to Maui hawaii', 'role': 'user'},\n", + " 'timestamp': '2024-05-14T16:08:25.904718',\n", + " 'sender_type': 'groupchat',\n", + " 'connection_id': None,\n", + " 'message_type': 'agent_message'},\n", + " {'recipient': 'chat_manager',\n", + " 'sender': 'planner_assistant',\n", + " 'message': {'content': \"To plan a two-day trip to Maui, Hawaii, we'll need to consider several aspects such as accommodations, transportation, activities, dining, and any specific interests you might have. Here's a preliminary outline for your trip:\\n\\n**Day 1: Arrival and Exploration**\\n\\n- **Morning:**\\n - Arrive at Kahului Airport (OGG).\\n - Pick up a rental car (considering the limited public transportation on the island).\\n - Check into your hotel or resort. I recommend staying in the West Maui area for its beautiful beaches and great snorkeling spots.\\n\\n- **Afternoon:**\\n - Have lunch at a local restaurant, such as Mama's Fish House for some fresh seafood.\\n - Visit the historic town of Lahaina. You can walk along Front Street, check out the art galleries, and see the famous Banyan Tree.\\n\\n- **Evening:**\\n - Attend a traditional Hawaiian luau. The Old Lahaina Luau is highly recommended for an authentic experience.\\n - Return to the hotel and rest.\\n\\n**Day 2: Adventure and Relaxation**\\n\\n- **Morning:**\\n - Drive to Haleakalā National Park to witness the sunrise (requires a reservation).\\n - Explore the park's visitor center and hiking trails.\\n\\n- **Afternoon:**\\n - Return to the hotel for a late breakfast or early lunch.\\n - Spend the afternoon at Ka'anapali Beach, where you can relax, swim, or snorkel.\\n\\n- **Evening:**\\n - Have dinner at a restaurant with a view, such as Merriman's Kapalua.\\n - Enjoy a sunset walk on the beach before heading back to the hotel.\\n\\n**Additional Tips:**\\n- Make reservations for accommodations, car rentals, and activities in advance, especially during peak travel seasons.\\n- Pack sunscreen, a hat, and comfortable walking shoes.\\n- Stay hydrated and respect the local environment and culture.\\n\\nI will now consult with the local_assistant and language_assistant for any additional suggestions or advice to enhance this travel plan. Please hold on.\",\n", + " 'role': 'user'},\n", + " 'timestamp': '2024-05-14T16:08:40.663732',\n", + " 'sender_type': 'groupchat',\n", + " 'connection_id': None,\n", + " 'message_type': 'agent_message'},\n", + " {'recipient': 'chat_manager',\n", " 'sender': 'local_assistant',\n", - " 'message': 'As your primary assistant, I\\'ll incorporate the cultural and linguistic aspects into your Maui trip plan to ensure a rich and authentic experience.\\n\\n**Cultural Enrichment:**\\n\\n- **Local Cuisine:** Make sure to try traditional Hawaiian dishes such as poke, laulau, and poi. Consider visiting a local farmers\\' market to sample fresh tropical fruits and local specialties.\\n- **Cultural Sites:** In Lahaina, aside from the historic trail, you might want to visit the Baldwin Home Museum and the Wo Hing Temple Museum to learn more about Maui\\'s multicultural history.\\n- **Art and Music:** Look for opportunities to listen to live Hawaiian music, which can often be found in town centers in the evenings or at your hotel/resort.\\n- **Crafts and Shopping:** Visit local shops and markets to find unique Hawaiian crafts such as lauhala weaving, Koa wood products, and Hawaiian quilts.\\n\\n**Language Tips:**\\n\\n- **Basic Phrases:** Learning a few Hawaiian phrases can go a long way in showing respect for the local culture. Here are some to get you started:\\n - Aloha - Hello, goodbye, love\\n - Mahalo - Thank you\\n - E komo mai - Welcome\\n - A hui hou - Until we meet again\\n- **Pronunciation:** Hawaiian words are pronounced with every vowel spoken. For example, \"Haleakalā\" is pronounced \"Ha-lay-ah-ka-lah.\"\\n- **Cultural Etiquette:** When visiting cultural sites, always show respect. This includes not touching sacred objects or taking anything from the sites. Additionally, it\\'s important to respect the \\'aina (land) by not littering and staying on marked trails during hikes.\\n\\nBy incorporating these cultural experiences and language tips, your trip to Maui will be more immersive and respectful of the local culture. Enjoy your adventure in this beautiful Hawaiian island!',\n", - " 'timestamp': '2024-02-07T12:34:35.740694',\n", - " 'sender_type': 'groupchat'},\n", - " {'recipient': 'group_chat_manager',\n", + " 'message': {'content': \"As a local assistant, I can provide you with more specific suggestions and information to enhance your two-day trip to Maui. Here are some additional tips and local insights:\\n\\n**Weather:**\\n- Check the weather forecast before you go. Maui's weather can be quite variable, with microclimates across the island. For example, Haleakalā National Park can be quite chilly in the morning, so bring a jacket.\\n\\n**Local Events:**\\n- Look for any local events happening during your stay. Maui often hosts cultural festivals, art shows, and farmers' markets that can offer a unique local experience.\\n\\n**Dining:**\\n- For a casual lunch, consider trying local food trucks or a plate lunch from a place like Aloha Mixed Plate.\\n- If you're interested in farm-to-table dining, visit a restaurant like The Mill House, which sources ingredients from the surrounding plantation.\\n\\n**Activities:**\\n- If you're interested in water activities, consider a snorkeling trip to Molokini Crater.\\n- For a unique experience, visit the Maui Ocean Center to learn about Hawaii's marine life.\\n- If you have time, a drive along the Road to Hana offers breathtaking scenery and numerous stops for waterfalls, hikes, and beaches. However, this is a full-day activity, so it might be challenging to fit into a two-day itinerary.\\n\\n**Shopping:**\\n- Whalers Village in Ka'anapali offers a variety of shops and restaurants if you're looking for souvenirs or local products.\\n\\n**Cultural Experiences:**\\n- Visit the Maui Arts & Cultural Center to see if there are any performances or exhibits during your stay.\\n\\nRemember to respect the local environment by not touching or stepping on coral reefs and by using reef-safe sunscreen to protect marine life. Always follow any posted signs and local guidelines to ensure the preservation of Maui's natural beauty.\\n\\nEnjoy your trip to Maui, and feel free to ask if you have any specific questions or need further assistance!\",\n", + " 'role': 'user'},\n", + " 'timestamp': '2024-05-14T16:08:54.848279',\n", + " 'sender_type': 'groupchat',\n", + " 'connection_id': None,\n", + " 'message_type': 'agent_message'},\n", + " {'recipient': 'chat_manager',\n", " 'sender': 'language_assistant',\n", - " 'message': 'Thank you for the local insights and language tips. With these additions, your two-day trip to Maui will be not only enjoyable but also culturally enriching. Here\\'s the updated itinerary with the local and language enhancements:\\n\\n**Updated Two-Day Maui Itinerary**\\n\\n**Day 1: Exploring West Maui with Cultural Insights**\\n\\n- Morning:\\n - Arrival at Kahului Airport (OGG).\\n - Pick up rental car.\\n - Breakfast at a local café, trying a Hawaiian breakfast specialty.\\n - Drive to Lahaina, a historic whaling village.\\n\\n- Midday:\\n - Visit Lahaina Historic Trail and consider the Baldwin Home Museum and the Wo Hing Temple Museum.\\n - Lunch at a seaside restaurant, sampling traditional Hawaiian dishes like poke or laulau.\\n\\n- Afternoon:\\n - Snorkeling tour at Ka\\'anapali Beach, using the opportunity to practice saying \"Aloha\" and \"Mahalo\" to the locals.\\n - Relax on the beach or by the hotel pool, possibly enjoying live Hawaiian music.\\n\\n- Evening:\\n - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau, immersing yourself in Hawaiian culture and cuisine.\\n - Return to hotel for overnight stay.\\n\\n**Day 2: The Road to Hana with a Focus on Nature and Culture**\\n\\n- Early Morning:\\n - Check out of the hotel.\\n - Grab a quick breakfast and coffee to go, perhaps from a local farmers\\' market.\\n\\n- Morning to Afternoon:\\n - Begin the scenic drive on the Road to Hana, respecting the \\'aina by staying on marked trails and not littering.\\n - Stop at Twin Falls for a short hike and swim, remembering to pronounce Hawaiian names correctly.\\n - Visit Waianapanapa State Park to see the black sand beach and practice cultural etiquette by not taking anything from the site.\\n\\n- Mid to Late Afternoon:\\n - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\\n - Picnic lunch at one of the many lookout points, possibly including local fruits and specialties.\\n\\n- Evening:\\n - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay, using Hawaiian phrases to express gratitude.\\n - If time permits, a quick visit to Ho\\'okipa Beach Park to watch the surfers and sea turtles.\\n\\n- Night:\\n - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\\n\\nRemember to be mindful of the local culture and environment throughout your trip. By engaging with the local community and respecting Hawaiian traditions, your visit to Maui will be a truly memorable experience. Enjoy your journey to this beautiful island paradise!',\n", - " 'timestamp': '2024-02-07T12:34:35.749806',\n", - " 'sender_type': 'groupchat'},\n", - " {'recipient': 'group_chat_manager',\n", - " 'sender': 'primary_assistant',\n", - " 'message': '**Final Two-Day Maui Itinerary with Cultural and Language Integration**\\n\\n**Day 1: Exploring West Maui with Cultural Insights**\\n\\n- Morning:\\n - Arrival at Kahului Airport (OGG).\\n - Pick up rental car.\\n - Breakfast at a local café, trying a Hawaiian breakfast specialty.\\n - Drive to Lahaina, a historic whaling village.\\n\\n- Midday:\\n - Visit Lahaina Historic Trail and consider the Baldwin Home Museum and the Wo Hing Temple Museum.\\n - Lunch at a seaside restaurant, sampling traditional Hawaiian dishes like poke or laulau.\\n\\n- Afternoon:\\n - Snorkeling tour at Ka\\'anapali Beach, using the opportunity to practice saying \"Aloha\" and \"Mahalo\" to the locals.\\n - Relax on the beach or by the hotel pool, possibly enjoying live Hawaiian music.\\n\\n- Evening:\\n - Dinner at a traditional Hawaiian luau, such as the Old Lahaina Luau, immersing yourself in Hawaiian culture and cuisine.\\n - Return to hotel for overnight stay.\\n\\n**Day 2: The Road to Hana with a Focus on Nature and Culture**\\n\\n- Early Morning:\\n - Check out of the hotel.\\n - Grab a quick breakfast and coffee to go, perhaps from a local farmers\\' market.\\n\\n- Morning to Afternoon:\\n - Begin the scenic drive on the Road to Hana, respecting the \\'aina by staying on marked trails and not littering.\\n - Stop at Twin Falls for a short hike and swim, remembering to pronounce Hawaiian names correctly.\\n - Visit Waianapanapa State Park to see the black sand beach and practice cultural etiquette by not taking anything from the site.\\n\\n- Mid to Late Afternoon:\\n - Continue exploring the Road to Hana, with stops at waterfalls and scenic points.\\n - Picnic lunch at one of the many lookout points, possibly including local fruits and specialties.\\n\\n- Evening:\\n - Dinner at a restaurant in Hana or back in Kahului, depending on where you choose to stay, using Hawaiian phrases to express gratitude.\\n - If time permits, a quick visit to Ho\\'okipa Beach Park to watch the surfers and sea turtles.\\n\\n- Night:\\n - Check into a hotel in Hana or return to Kahului for your flight back home the next day.\\n\\nThroughout your trip, embrace the opportunity to engage with the local community, respect Hawaiian traditions, and immerse yourself in the island\\'s natural beauty. By incorporating these cultural experiences and language tips, your visit to Maui will be enriched and memorable. Have a fantastic journey to this enchanting island paradise! TERMINATE',\n", - " 'timestamp': '2024-02-07T12:34:35.759164',\n", - " 'sender_type': 'groupchat'}]" + " 'message': {'content': \"Regarding language and communication challenges, traveling to Maui, Hawaii, should be relatively straightforward as English is the primary language spoken there. However, it's always appreciated when visitors show respect and interest in the local culture and language. Here are some tips to address language and communication in Maui:\\n\\n1. **Learn Basic Hawaiian Phrases:** While English is widely spoken, Hawaiian is an official language of the state. Learning a few basic phrases can be a sign of respect and can enhance your experience. For example:\\n - Aloha - Hello/Goodbye\\n - Mahalo - Thank you\\n - 'A'ole pilikia - You're welcome/No problem\\n\\n2. **Cultural Sensitivity:** Be aware of the cultural significance of certain Hawaiian words and places. Show respect when visiting culturally important sites or participating in traditional events like luaus.\\n\\n3. **Use of Pidgin:** Some locals may speak Pidgin, a creole language in Hawaii that incorporates elements of Hawaiian, English, and other languages. It's not necessary to learn Pidgin, but understanding that it exists can help with communication.\\n\\n4. **Ask Politely:** If you don't understand something, whether it's a word or a cultural practice, it's perfectly acceptable to ask. Most locals are happy to explain and share their culture with interested visitors.\\n\\n5. **Travel Guides and Apps:** Consider downloading travel apps or carrying a guidebook that includes a section on local language and customs.\\n\\n6. **Cultural Experiences:** Participate in cultural experiences that may offer language workshops or introductions to Hawaiian language and culture.\\n\\nSince the travel plan does not specifically include language tips, these suggestions should be incorporated to ensure a smooth and culturally respectful experience. However, given that English is the primary language, you should not expect significant language barriers while in Maui. Enjoy your trip!\",\n", + " 'role': 'user'},\n", + " 'timestamp': '2024-05-14T16:09:08.872069',\n", + " 'sender_type': 'groupchat',\n", + " 'connection_id': None,\n", + " 'message_type': 'agent_message'},\n", + " {'recipient': 'chat_manager',\n", + " 'sender': 'user_proxy',\n", + " 'message': {'content': 'TERMINATE', 'role': 'user'},\n", + " 'timestamp': '2024-05-14T16:09:10.032145',\n", + " 'sender_type': 'groupchat',\n", + " 'connection_id': None,\n", + " 'message_type': 'agent_message'}]" ] }, - "execution_count": 6, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "group_agent_work_flow.agent_history" + "# print the agent history\n", + "print(len(travel_workflow_manager.agent_history), \"agent messages were involved in the conversation\")\n", + "travel_workflow_manager.agent_history" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/samples/apps/autogen-studio/notebooks/two_agent.json b/samples/apps/autogen-studio/notebooks/two_agent.json new file mode 100644 index 000000000000..ad9645a2311d --- /dev/null +++ b/samples/apps/autogen-studio/notebooks/two_agent.json @@ -0,0 +1,85 @@ +{ + "name": "Two Agent Workflow", + "type": "twoagents", + "user_id": "guestuser@gmail.com", + "description": "Default workflow", + "summary_method": "last", + "sender": { + "type": "userproxy", + "user_id": "guestuser@gmail.com", + "config": { + "name": "user_proxy", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful assistant", + "is_termination_msg": null, + "code_execution_config": "local", + "default_auto_reply": "TERMINATE", + "description": "User Proxy Agent Configuration", + "llm_config": false, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "skills": [], + "models": [], + "agents": [] + }, + "receiver": { + "type": "assistant", + "user_id": "guestuser@gmail.com", + "config": { + "name": "default_assistant", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "system_message": "You are a helpful AI assistant.\nSolve tasks using your coding and language skills.\nIn the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.\n 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.\n 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.\nSolve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.\nWhen using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.\nIf you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.\nIf the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.\nWhen you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.\nReply \"TERMINATE\" in the end when everything is done.\n ", + "is_termination_msg": null, + "code_execution_config": "none", + "default_auto_reply": "", + "description": "Assistant Agent", + "llm_config": { + "config_list": [ + { + "api_type": "open_ai", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "temperature": 0, + "cache_seed": null, + "timeout": null, + "max_tokens": 1000, + "extra_body": null + }, + "admin_name": "Admin", + "messages": [], + "max_round": 100, + "speaker_selection_method": "auto", + "allow_repeat_speaker": true + }, + "skills": [ + { + "user_id": "guestuser@gmail.com", + "name": "generate_images", + "content": "\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = \"1024x1024\") -> List[str]:\n \"\"\"\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is \"1024x1024\")\n :return: A list of filenames for the saved images.\n \"\"\"\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n", + "description": "Generate and save images based on a user's query.", + "secrets": {}, + "libraries": {} + } + ], + "models": [ + { + "user_id": "guestuser@gmail.com", + "api_type": "open_ai", + "description": "OpenAI GPT-4 model", + "model": "gpt-4-1106-preview", + "base_url": null, + "api_version": null + } + ], + "agents": [] + } +} diff --git a/samples/apps/autogen-studio/work_dir/skills.py b/samples/apps/autogen-studio/work_dir/skills.py new file mode 100644 index 000000000000..fd967729f960 --- /dev/null +++ b/samples/apps/autogen-studio/work_dir/skills.py @@ -0,0 +1,55 @@ +##### Begin of generate_images ##### + + +import uuid +from pathlib import Path +from typing import List + +import requests # to perform HTTP requests +from openai import OpenAI + + +def generate_and_save_images(query: str, image_size: str = "1024x1024") -> List[str]: + """ + Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image. + + :param query: A natural language description of the image to be generated. + :param image_size: The size of the image to be generated. (default is "1024x1024") + :return: A list of filenames for the saved images. + """ + + client = OpenAI() # Initialize the OpenAI client + response = client.images.generate(model="dall-e-3", prompt=query, n=1, size=image_size) # Generate images + + # List to store the file names of saved images + saved_files = [] + + # Check if the response is successful + if response.data: + for image_data in response.data: + # Generate a random UUID as the file name + file_name = str(uuid.uuid4()) + ".png" # Assuming the image is a PNG + file_path = Path(file_name) + + img_url = image_data.url + img_response = requests.get(img_url) + if img_response.status_code == 200: + # Write the binary content to a file + with open(file_path, "wb") as img_file: + img_file.write(img_response.content) + print(f"Image saved to {file_path}") + saved_files.append(str(file_path)) + else: + print(f"Failed to download the image from {img_url}") + else: + print("No image data found in the response!") + + # Return the list of saved files + return saved_files + + +# Example usage of the function: +# generate_and_save_images("A cute baby sea otter") + + +#### End of generate_images #### From 3d354a86f63221263c4f7014199ae18fb4b979f6 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Fri, 17 May 2024 22:34:54 -0700 Subject: [PATCH 02/15] support for sequential workflows --- .../autogenstudio/database/dbmanager.py | 17 +- .../autogenstudio/database/utils.py | 24 +- .../autogen-studio/autogenstudio/datamodel.py | 11 +- .../autogenstudio/utils/utils.py | 1 + .../autogen-studio/autogenstudio/web/app.py | 32 +- .../autogenstudio/workflowmanager.py | 279 ++++++++++++++++-- .../frontend/src/components/types.ts | 6 +- .../frontend/src/components/utils.ts | 76 ++--- .../src/components/views/builder/agents.tsx | 2 +- .../views/builder/utils/agentconfig.tsx | 2 +- .../views/builder/utils/selectors.tsx | 233 +++++++++------ .../views/builder/utils/workflowconfig.tsx | 6 +- .../src/components/views/builder/workflow.tsx | 14 +- .../frontend/src/styles/global.css | 3 +- 14 files changed, 503 insertions(+), 203 deletions(-) diff --git a/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py b/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py index f964e3254cae..f34d3b165dc6 100644 --- a/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py @@ -16,12 +16,18 @@ Skill, Workflow, WorkflowAgentLink, + WorkflowAgentType, ) from .utils import init_db_samples valid_link_types = ["agent_model", "agent_skill", "agent_agent", "workflow_agent"] +class WorkflowAgentMap(SQLModel): + agent: Agent + link: WorkflowAgentLink + + class DBManager: """A class to manage database operations""" @@ -222,20 +228,21 @@ def get_linked_entities( linked_entities = agent.agents elif link_type == "workflow_agent": linked_entities = session.exec( - select(Agent) - .join(WorkflowAgentLink) + select(WorkflowAgentLink, Agent) + .join(Agent, WorkflowAgentLink.agent_id == Agent.id) .where( WorkflowAgentLink.workflow_id == primary_id, - WorkflowAgentLink.agent_type == agent_type, - WorkflowAgentLink.sequence_id == sequence_id, ) ).all() + + linked_entities = [WorkflowAgentMap(agent=agent, link=link) for link, agent in linked_entities] + linked_entities = sorted(linked_entities, key=lambda x: x.link.sequence_id) # type: ignore except Exception as e: logger.error("Error while getting linked entities: " + str(e)) status_message = f"Error while getting linked entities: {e}" status = False if return_json: - linked_entities = [self._model_to_dict(row) for row in linked_entities] + linked_entities = [row.model_dump() for row in linked_entities] response = Response( message=status_message, diff --git a/samples/apps/autogen-studio/autogenstudio/database/utils.py b/samples/apps/autogen-studio/autogenstudio/database/utils.py index bbad46b7d94f..5e48938722bc 100644 --- a/samples/apps/autogen-studio/autogenstudio/database/utils.py +++ b/samples/apps/autogen-studio/autogenstudio/database/utils.py @@ -76,8 +76,11 @@ def get_agent(agent_id): for link in workflow_agent_links: agent_dict = get_agent(link.agent_id) agents.append({"agent": agent_dict, "link": link.model_dump(mode="json")}) - workflow[str(link.agent_type.value)] = agent_dict - + # workflow[str(link.agent_type.value)] = agent_dict + if workflow["type"] == WorkFlowType.sequential.value: + # sort agents by sequence_id in link + agents = sorted(agents, key=lambda x: x["link"]["sequence_id"]) + workflow["agents"] = agents return workflow @@ -173,8 +176,14 @@ def init_db_samples(dbmanager: Any): ) # skills + generate_pdf_skill = Skill( + name="generate_and_save_pdf", + description="Generate and save a pdf file based on the provided input sections.", + user_id="guestuser@gmail.com", + content='import uuid\nimport requests\nfrom fpdf import FPDF\nfrom typing import List, Dict, Optional\nfrom pathlib import Path\nfrom PIL import Image, ImageDraw, ImageOps\nfrom io import BytesIO\n\ndef generate_and_save_pdf(\n sections: List[Dict[str, Optional[str]]], \n output_file: str = "report.pdf", \n report_title: str = "PDF Report"\n) -> None:\n """\n Function to generate a beautiful PDF report in A4 paper format. \n\n :param sections: A list of sections where each section is represented by a dictionary containing:\n - title: The title of the section.\n - level: The heading level (e.g., "title", "h1", "h2").\n - content: The content or body text of the section.\n - image: (Optional) The URL or local path to the image.\n :param output_file: The name of the output PDF file. (default is "report.pdf")\n :param report_title: The title of the report. (default is "PDF Report")\n :return: None\n """\n\n def get_image(image_url_or_path):\n if image_url_or_path.startswith("http://") or image_url_or_path.startswith("https://"):\n response = requests.get(image_url_or_path)\n if response.status_code == 200:\n return BytesIO(response.content)\n elif Path(image_url_or_path).is_file():\n return open(image_url_or_path, \'rb\')\n return None\n\n def add_rounded_corners(img, radius=6):\n mask = Image.new(\'L\', img.size, 0)\n draw = ImageDraw.Draw(mask)\n draw.rounded_rectangle([(0, 0), img.size], radius, fill=255)\n img = ImageOps.fit(img, mask.size, centering=(0.5, 0.5))\n img.putalpha(mask)\n return img\n\n class PDF(FPDF):\n def header(self):\n self.set_font("Arial", "B", 12)\n self.cell(0, 10, report_title, 0, 1, "C")\n \n def chapter_title(self, txt): \n self.set_font("Arial", "B", 12)\n self.cell(0, 10, txt, 0, 1, "L")\n self.ln(2)\n \n def chapter_body(self, body):\n self.set_font("Arial", "", 12)\n self.multi_cell(0, 10, body)\n self.ln()\n\n def add_image(self, img_data):\n img = Image.open(img_data)\n img = add_rounded_corners(img)\n img_path = Path(f"temp_{uuid.uuid4().hex}.png")\n img.save(img_path, format="PNG")\n self.image(str(img_path), x=None, y=None, w=190 if img.width > 190 else img.width)\n self.ln(10)\n img_path.unlink()\n\n pdf = PDF()\n pdf.add_page()\n font_size = {"title": 16, "h1": 14, "h2": 12, "body": 12}\n\n for section in sections:\n title, level, content, image = section.get("title", ""), section.get("level", "h1"), section.get("content", ""), section.get("image")\n pdf.set_font("Arial", "B" if level in font_size else "", font_size.get(level, font_size["body"]))\n pdf.chapter_title(title)\n\n if content: pdf.chapter_body(content)\n if image:\n img_data = get_image(image)\n if img_data:\n pdf.add_image(img_data)\n if isinstance(img_data, BytesIO):\n img_data.close()\n\n pdf.output(output_file)\n print(f"PDF report saved as {output_file}")\n\n# # Example usage\n# sections = [\n# {\n# "title": "Introduction - Early Life",\n# "level": "h1",\n# "image": "https://picsum.photos/536/354",\n# "content": ("Marie Curie was born on 7 November 1867 in Warsaw, Poland. "\n# "She was the youngest of five children. Both of her parents were teachers. "\n# "Her father was a math and physics instructor, and her mother was the head of a private school. "\n# "Marie\'s curiosity and brilliance were evident from an early age."),\n# },\n# {\n# "title": "Academic Accomplishments",\n# "level": "h2",\n# "content": ("Despite many obstacles, Marie Curie earned degrees in physics and mathematics from the University of Paris. "\n# "She conducted groundbreaking research on radioactivity, becoming the first woman to win a Nobel Prize. "\n# "Her achievements paved the way for future generations of scientists, particularly women in STEM fields."),\n# },\n# {\n# "title": "Major Discoveries",\n# "level": "h2",\n# "image": "https://picsum.photos/536/354",\n# "content": ("One of Marie Curie\'s most notable discoveries was that of radium and polonium, two radioactive elements. "\n# "Her meticulous work not only advanced scientific understanding but also had practical applications in medicine and industry."),\n# },\n# {\n# "title": "Conclusion - Legacy",\n# "level": "h1",\n# "content": ("Marie Curie\'s legacy lives on through her contributions to science, her role as a trailblazer for women in STEM, "\n# "and the ongoing impact of her discoveries on modern medicine and technology. "\n# "Her life and work remain an inspiration to many, demonstrating the power of perseverance and intellectual curiosity."),\n# },\n# ]\n\n# generate_and_save_pdf_report(sections, "my_report.pdf", "The Life of Marie Curie")', + ) generate_image_skill = Skill( - name="generate_images", + name="generate_and_save_images", description="Generate and save images based on a user's query.", content='\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = "1024x1024") -> List[str]:\n """\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI\'s DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is "1024x1024")\n :return: A list of filenames for the saved images.\n """\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model="dall-e-3", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + ".png" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, "wb") as img_file:\n img_file.write(img_response.content)\n print(f"Image saved to {file_path}")\n saved_files.append(str(file_path))\n else:\n print(f"Failed to download the image from {img_url}")\n else:\n print("No image data found in the response!")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images("A cute baby sea otter")\n', user_id="guestuser@gmail.com", @@ -187,12 +196,14 @@ def init_db_samples(dbmanager: Any): description="Assistant Agent", human_input_mode="NEVER", max_consecutive_auto_reply=25, - system_message="You are a helpful assistant that can suggest a travel plan for a user. You are the primary cordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant). You must ensure that the finally plan integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.", + system_message="You are a helpful assistant that can suggest a travel plan for a user and utilize any context information provided. You are the primary cordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant). You must ensure that the finally plan integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.", code_execution_config=CodeExecutionConfigTypes.none, llm_config={}, ) planner_assistant = Agent( - user_id="guestuser@gmail.com", type=AgentType.assistant, config=planner_assistant_config.model_dump(mode="json") + user_id="guestuser@gmail.com", + type=AgentType.assistant, + config=planner_assistant_config.model_dump(mode="json"), ) local_assistant_config = AgentConfig( @@ -200,7 +211,7 @@ def init_db_samples(dbmanager: Any): description="Local Assistant Agent", human_input_mode="NEVER", max_consecutive_auto_reply=25, - system_message="You are a local assistant that can suggest local activities or places to visit for a user. You can suggest local activities, places to visit, restaurants to eat at, etc. You can also provide information about the weather, local events, etc. You can provide information about the local area, but you cannot suggest a complete travel plan. You can only provide information about the local area.", + system_message="You are a local assistant that can suggest local activities or places to visit for a user and can utilize any context information provided. You can suggest local activities, places to visit, restaurants to eat at, etc. You can also provide information about the weather, local events, etc. You can provide information about the local area, but you cannot suggest a complete travel plan. You can only provide information about the local area.", code_execution_config=CodeExecutionConfigTypes.none, llm_config={}, ) @@ -290,6 +301,7 @@ def init_db_samples(dbmanager: Any): session.add(azure_model) session.add(gpt_4_model) session.add(generate_image_skill) + session.add(generate_pdf_skill) session.add(user_proxy) session.add(default_assistant) session.add(travel_groupchat_agent) diff --git a/samples/apps/autogen-studio/autogenstudio/datamodel.py b/samples/apps/autogen-studio/autogenstudio/datamodel.py index 5614153a8851..0fb749414de8 100644 --- a/samples/apps/autogen-studio/autogenstudio/datamodel.py +++ b/samples/apps/autogen-studio/autogenstudio/datamodel.py @@ -82,6 +82,7 @@ class Skill(SQLModel, table=True): sa_column=Column(DateTime(timezone=True), onupdate=func.now()), ) # pylint: disable=not-callable user_id: Optional[str] = None + version: Optional[str] = "0.0.1" name: str content: str description: Optional[str] = None @@ -97,7 +98,7 @@ class LLMConfig(SQLModel, table=False): temperature: float = 0 cache_seed: Optional[Union[int, None]] = None timeout: Optional[int] = None - max_tokens: Optional[int] = 1000 + max_tokens: Optional[int] = 2048 extra_body: Optional[dict] = None @@ -119,6 +120,7 @@ class Model(SQLModel, table=True): sa_column=Column(DateTime(timezone=True), onupdate=func.now()), ) # pylint: disable=not-callable user_id: Optional[str] = None + version: Optional[str] = "0.0.1" model: str api_key: Optional[str] = None base_url: Optional[str] = None @@ -175,7 +177,7 @@ class WorkflowAgentLink(SQLModel, table=True): default=WorkflowAgentType.sender, sa_column=Column(SqlEnum(WorkflowAgentType), primary_key=True), ) - sequence_id: Optional[int] = None + sequence_id: Optional[int] = Field(default=0, primary_key=True) class AgentLink(SQLModel, table=True): @@ -196,8 +198,9 @@ class Agent(SQLModel, table=True): sa_column=Column(DateTime(timezone=True), onupdate=func.now()), ) # pylint: disable=not-callable user_id: Optional[str] = None + version: Optional[str] = "0.0.1" type: AgentType = Field(default=AgentType.assistant, sa_column=Column(SqlEnum(AgentType))) - config: AgentConfig = Field(default_factory=AgentConfig, sa_column=Column(JSON)) + config: Union[AgentConfig, dict] = Field(default_factory=AgentConfig, sa_column=Column(JSON)) skills: List[Skill] = Relationship(back_populates="agents", link_model=AgentSkillLink) models: List[Model] = Relationship(back_populates="agents", link_model=AgentModelLink) workflows: List["Workflow"] = Relationship(link_model=WorkflowAgentLink, back_populates="agents") @@ -217,6 +220,7 @@ class Agent(SQLModel, table=True): secondaryjoin="Agent.id==AgentLink.agent_id", ), ) + task_instruction: Optional[str] = None class WorkFlowType(str, Enum): @@ -242,6 +246,7 @@ class Workflow(SQLModel, table=True): sa_column=Column(DateTime(timezone=True), onupdate=func.now()), ) # pylint: disable=not-callable user_id: Optional[str] = None + version: Optional[str] = "0.0.1" name: str description: str agents: List[Agent] = Relationship(back_populates="workflows", link_model=WorkflowAgentLink) diff --git a/samples/apps/autogen-studio/autogenstudio/utils/utils.py b/samples/apps/autogen-studio/autogenstudio/utils/utils.py index e570e3b8e158..9a27044e5362 100644 --- a/samples/apps/autogen-studio/autogenstudio/utils/utils.py +++ b/samples/apps/autogen-studio/autogenstudio/utils/utils.py @@ -312,6 +312,7 @@ def get_skills_from_prompt(skills: List[Skill], work_dir: str) -> str: prompt += f""" ##### Begin of {skill.name} ##### +# from skills import {skill.name} # Import the function from skills.py {skill.content} diff --git a/samples/apps/autogen-studio/autogenstudio/web/app.py b/samples/apps/autogen-studio/autogenstudio/web/app.py index ada9c436d731..37560f0892ea 100644 --- a/samples/apps/autogen-studio/autogenstudio/web/app.py +++ b/samples/apps/autogen-studio/autogenstudio/web/app.py @@ -4,7 +4,7 @@ import threading import traceback from contextlib import asynccontextmanager -from typing import Any +from typing import Any, Union from fastapi import FastAPI, WebSocket, WebSocketDisconnect from fastapi.middleware.cors import CORSMiddleware @@ -337,6 +337,19 @@ async def link_workflow_agent(workflow_id: int, agent_id: int, agent_type: str): ) +@api.post("/workflows/link/agent/{workflow_id}/{agent_id}/{agent_type}/{sequence_id}") +async def link_workflow_agent_sequence(workflow_id: int, agent_id: int, agent_type: str, sequence_id: int): + """Link an agent to a workflow""" + print("Sequence ID: ", sequence_id) + return dbmanager.link( + link_type="workflow_agent", + primary_id=workflow_id, + secondary_id=agent_id, + agent_type=agent_type, + sequence_id=sequence_id, + ) + + @api.delete("/workflows/link/agent/{workflow_id}/{agent_id}/{agent_type}") async def unlink_workflow_agent(workflow_id: int, agent_id: int, agent_type: str): """Unlink an agent from a workflow""" @@ -348,13 +361,24 @@ async def unlink_workflow_agent(workflow_id: int, agent_id: int, agent_type: str ) -@api.get("/workflows/link/agent/{workflow_id}/{agent_type}") -async def get_linked_workflow_agents(workflow_id: int, agent_type: str): +@api.delete("/workflows/link/agent/{workflow_id}/{agent_id}/{agent_type}/{sequence_id}") +async def unlink_workflow_agent_sequence(workflow_id: int, agent_id: int, agent_type: str, sequence_id: int): + """Unlink an agent from a workflow sequence""" + return dbmanager.unlink( + link_type="workflow_agent", + primary_id=workflow_id, + secondary_id=agent_id, + agent_type=agent_type, + sequence_id=sequence_id, + ) + + +@api.get("/workflows/link/agent/{workflow_id}") +async def get_linked_workflow_agents(workflow_id: int): """Get all agents linked to a workflow""" return dbmanager.get_linked_entities( link_type="workflow_agent", primary_id=workflow_id, - agent_type=agent_type, return_json=True, ) diff --git a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py index 1bbdfd6b758b..d81d2ce2290b 100644 --- a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py @@ -6,14 +6,7 @@ import autogen -from .datamodel import ( - Agent, - AgentType, - Message, - SocketMessage, - WorkFlowSummaryMethod, - WorkFlowType, -) +from .datamodel import Agent, AgentType, Message, SocketMessage, Workflow, WorkFlowSummaryMethod, WorkFlowType from .utils import ( clear_folder, get_modified_files, @@ -24,7 +17,7 @@ ) -class WorkflowManager: +class AutoWorkflowManager: """ WorkflowManager class to load agents from a provided configuration and run a chat between them. """ @@ -81,10 +74,12 @@ def _run_workflow(self, message: str, history: Optional[List[Message]] = None, c clear_history: If set to True, clears the chat history before initiating. """ - - if self.workflow.get("type", None) == WorkFlowType.autonomous.value: - self.sender = self.load(self.workflow.get("sender")) - self.receiver = self.load(self.workflow.get("receiver")) + for agent in self.workflow.get("agents", []): + if agent.get("link").get("agent_type") == "sender": + self.sender = self.load(agent.get("agent")) + elif agent.get("link").get("agent_type") == "receiver": + self.receiver = self.load(agent.get("agent")) + if self.sender and self.receiver: if history: self._populate_history(history) self.sender.initiate_chat( @@ -92,6 +87,8 @@ def _run_workflow(self, message: str, history: Optional[List[Message]] = None, c message=message, clear_history=clear_history, ) + else: + raise ValueError("Sender and receiver agents are not defined in the workflow configuration.") def _serialize_agent( self, @@ -284,6 +281,47 @@ def load(self, agent: Any) -> autogen.Agent: raise ValueError(f"Unknown agent type: {agent.type}") return agent + def _generate_output( + self, + message_text: str, + summary_method: str, + ) -> str: + """ + Generates the output response based on the workflow configuration and agent history. + + :param message_text: The text of the incoming message. + :param flow: An instance of `WorkflowManager`. + :param flow_config: An instance of `AgentWorkFlowConfig`. + :return: The output response as a string. + """ + + output = "" + if summary_method == WorkFlowSummaryMethod.last: + (self.agent_history) + last_message = self.agent_history[-1]["message"]["content"] if self.agent_history else "" + output = last_message + elif summary_method == WorkFlowSummaryMethod.llm: + client = self.receiver.client + if self.connection_id: + status_message = SocketMessage( + type="agent_status", + data={ + "status": "summarizing", + "message": "Summarizing agent dialogue", + }, + connection_id=self.connection_id, + ) + self.send_message_function(status_message.model_dump(mode="json")) + output = summarize_chat_history( + task=message_text, + messages=self.agent_history, + client=client, + ) + + elif summary_method == "none": + output = "" + return output + def run(self, message: str, history: Optional[List[Message]] = None, clear_history: bool = False) -> Message: """ Initiates a chat between the sender and receiver agents with an initial message @@ -312,6 +350,116 @@ def run(self, message: str, history: Optional[List[Message]] = None, clear_histo ) return result_message + +class SequentialWorkflowManager: + """ + WorkflowManager class to load agents from a provided configuration and run a chat between them. + """ + + def __init__( + self, + workflow: Union[Dict, str], + history: Optional[List[Message]] = None, + work_dir: str = None, + clear_work_dir: bool = True, + send_message_function: Optional[callable] = None, + connection_id: Optional[str] = None, + ) -> None: + """ + Initializes the WorkflowManager with agents specified in the config and optional message history. + + Args: + workflow (Union[Dict, str]): The workflow configuration. This can be a dictionary or a string which is a path to a JSON file. + history (Optional[List[Message]]): The message history. + work_dir (str): The working directory. + clear_work_dir (bool): If set to True, clears the working directory. + send_message_function (Optional[callable]): The function to send messages. + connection_id (Optional[str]): The connection identifier. + """ + if isinstance(workflow, str): + if os.path.isfile(workflow): + with open(workflow, "r") as file: + self.workflow = json.load(file) + else: + raise FileNotFoundError(f"The file {workflow} does not exist.") + elif isinstance(workflow, dict): + self.workflow = workflow + else: + raise ValueError("The 'workflow' parameter should be either a dictionary or a valid JSON file path") + + # TODO - improved typing for workflow + self.send_message_function = send_message_function + self.connection_id = connection_id + self.work_dir = work_dir or "work_dir" + if clear_work_dir: + clear_folder(self.work_dir) + self.agent_history = [] + self.history = history or [] + self.sender = None + self.receiver = None + self.model_client = None + + def _run_workflow(self, message: str, history: Optional[List[Message]] = None, clear_history: bool = False) -> None: + """ + Runs the workflow based on the provided configuration. + + Args: + message: The initial message to start the chat. + history: A list of messages to populate the agents' history. + clear_history: If set to True, clears the chat history before initiating. + + """ + user_proxy = { + "config": { + "name": "user_proxy", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 25, + "code_execution_config": "local", + "default_auto_reply": "TERMINATE", + "description": "User Proxy Agent Configuration", + "llm_config": False, + "type": "userproxy", + } + } + sequential_history = [] + for i, agent in enumerate(self.workflow.get("agents", [])): + workflow = Workflow( + name="agent workflow", type=WorkFlowType.autonomous, summary_method=WorkFlowSummaryMethod.llm + ) + workflow = workflow.model_dump(mode="json") + agent = agent.get("agent") + workflow["agents"] = [ + {"agent": user_proxy, "link": {"agent_type": "sender"}}, + {"agent": agent, "link": {"agent_type": "receiver"}}, + ] + + auto_workflow = AutoWorkflowManager( + workflow=workflow, + history=history, + work_dir=self.work_dir, + clear_work_dir=True, + send_message_function=self.send_message_function, + connection_id=self.connection_id, + ) + task_prompt = ( + f""" + Your primary instructions are as follows: + {agent.get("task_instruction")} + Context for addressing your task is below: + ======= + {str(sequential_history)} + ======= + Now address your task: + """ + if i > 0 + else message + ) + result = auto_workflow.run(message=task_prompt, clear_history=clear_history) + sequential_history.append(result.content) + self.model_client = auto_workflow.receiver.client + print(f"======== end of sequence === {i}============") + self.agent_history.extend(result.meta.get("messages", [])) + def _generate_output( self, message_text: str, @@ -332,26 +480,109 @@ def _generate_output( last_message = self.agent_history[-1]["message"]["content"] if self.agent_history else "" output = last_message elif summary_method == WorkFlowSummaryMethod.llm: - client = self.receiver.client - status_message = SocketMessage( - type="agent_status", - data={ - "status": "summarizing", - "message": "Summarizing agent dialogue", - }, - connection_id=self.connection_id, - ) - self.send_message_function(status_message.model_dump(mode="json")) + if self.connection_id: + status_message = SocketMessage( + type="agent_status", + data={ + "status": "summarizing", + "message": "Summarizing agent dialogue", + }, + connection_id=self.connection_id, + ) + self.send_message_function(status_message.model_dump(mode="json")) output = summarize_chat_history( task=message_text, messages=self.agent_history, - client=client, + client=self.model_client, ) elif summary_method == "none": output = "" return output + def run(self, message: str, history: Optional[List[Message]] = None, clear_history: bool = False) -> Message: + """ + Initiates a chat between the sender and receiver agents with an initial message + and an option to clear the history. + + Args: + message: The initial message to start the chat. + clear_history: If set to True, clears the chat history before initiating. + """ + + start_time = time.time() + self._run_workflow(message=message, history=history, clear_history=clear_history) + end_time = time.time() + output = self._generate_output(message, self.workflow.get("summary_method", "last")) + + result_message = Message( + content=output, + role="assistant", + meta={ + "messages": self.agent_history, + "summary_method": self.workflow.get("summary_method", "last"), + "time": end_time - start_time, + "files": get_modified_files(start_time, end_time, source_dir=self.work_dir), + }, + ) + return result_message + + +class WorkflowManager: + """ + WorkflowManager class to load agents from a provided configuration and run a chat between them. + """ + + def __new__( + self, + workflow: Union[Dict, str], + history: Optional[List[Message]] = None, + work_dir: str = None, + clear_work_dir: bool = True, + send_message_function: Optional[callable] = None, + connection_id: Optional[str] = None, + ) -> None: + """ + Initializes the WorkflowManager with agents specified in the config and optional message history. + + Args: + workflow (Union[Dict, str]): The workflow configuration. This can be a dictionary or a string which is a path to a JSON file. + history (Optional[List[Message]]): The message history. + work_dir (str): The working directory. + clear_work_dir (bool): If set to True, clears the working directory. + send_message_function (Optional[callable]): The function to send messages. + connection_id (Optional[str]): The connection identifier. + """ + if isinstance(workflow, str): + if os.path.isfile(workflow): + with open(workflow, "r") as file: + self.workflow = json.load(file) + else: + raise FileNotFoundError(f"The file {workflow} does not exist.") + elif isinstance(workflow, dict): + self.workflow = workflow + else: + raise ValueError("The 'workflow' parameter should be either a dictionary or a valid JSON file path") + + if self.workflow.get("type") == WorkFlowType.autonomous.value: + return AutoWorkflowManager( + workflow=workflow, + history=history, + work_dir=work_dir, + clear_work_dir=clear_work_dir, + send_message_function=send_message_function, + connection_id=connection_id, + ) + elif self.workflow.get("type") == WorkFlowType.sequential.value: + return SequentialWorkflowManager( + workflow=workflow, + history=history, + work_dir=work_dir, + clear_work_dir=clear_work_dir, + send_message_function=send_message_function, + connection_id=connection_id, + ) + class ExtendedConversableAgent(autogen.ConversableAgent): def __init__(self, message_processor=None, *args, **kwargs): diff --git a/samples/apps/autogen-studio/frontend/src/components/types.ts b/samples/apps/autogen-studio/frontend/src/components/types.ts index 9d1221f539a9..2ff075ad46cf 100644 --- a/samples/apps/autogen-studio/frontend/src/components/types.ts +++ b/samples/apps/autogen-studio/frontend/src/components/types.ts @@ -63,9 +63,9 @@ export interface IAgent { export interface IWorkflow { name: string; description: string; - sender: IAgent; - receiver: IAgent; - type: "autonomous" | "sequential"; + sender?: IAgent; + receiver?: IAgent; + type?: "autonomous" | "sequential"; created_at?: string; updated_at?: string; summary_method?: "none" | "last" | "llm"; diff --git a/samples/apps/autogen-studio/frontend/src/components/utils.ts b/samples/apps/autogen-studio/frontend/src/components/utils.ts index 8720a4ca106c..caede828609c 100644 --- a/samples/apps/autogen-studio/frontend/src/components/utils.ts +++ b/samples/apps/autogen-studio/frontend/src/components/utils.ts @@ -286,13 +286,36 @@ export const getRandomIntFromDateAndSalt = (salt: number = 43444) => { return randomInt; }; +export const getSampleWorkflow = (workflow_type: string = "autonomous") => { + const autonomousWorkflow: IWorkflow = { + name: "Default Chat Workflow", + description: "Autonomous Workflow", + type: "autonomous", + summary_method: "llm", + }; + const sequentialWorkflow: IWorkflow = { + name: "Default Sequential Workflow", + description: "Sequential Workflow", + type: "sequential", + summary_method: "llm", + }; + + if (workflow_type === "autonomous") { + return autonomousWorkflow; + } else if (workflow_type === "sequential") { + return sequentialWorkflow; + } else { + return autonomousWorkflow; + } +}; + export const sampleAgentConfig = (agent_type: string = "assistant") => { const llm_config: ILLMConfig = { config_list: [], temperature: 0.1, timeout: 600, cache_seed: null, - max_tokens: 1000, + max_tokens: 4000, }; const userProxyConfig: IAgentConfig = { @@ -357,57 +380,6 @@ export const sampleAgentConfig = (agent_type: string = "assistant") => { } }; -export const sampleWorkflowConfig = (type = "autonomous") => { - const llm_model_config: IModelConfig[] = []; - - const llm_config: ILLMConfig = { - config_list: llm_model_config, - temperature: 0.1, - timeout: 600, - cache_seed: null, - max_tokens: 1000, - }; - - const userProxyConfig: IAgentConfig = { - name: "userproxy", - human_input_mode: "NEVER", - max_consecutive_auto_reply: 15, - system_message: "You are a helpful assistant.", - default_auto_reply: "TERMINATE", - llm_config: false, - code_execution_config: "local", - }; - const userProxyFlowSpec: IAgent = { - type: "userproxy", - config: userProxyConfig, - }; - - const assistantConfig: IAgentConfig = { - name: "primary_assistant", - llm_config: llm_config, - human_input_mode: "NEVER", - max_consecutive_auto_reply: 8, - code_execution_config: "none", - system_message: - "You are a helpful AI assistant. Solve tasks using your coding and language skills. In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself. 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly. Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill. When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user. If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user. If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try. When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible. Reply 'TERMINATE' in the end when everything is done.", - }; - - const assistantFlowSpec: IAgent = { - type: "assistant", - config: assistantConfig, - }; - - const workFlowConfig: IWorkflow = { - name: "Default Agent Workflow", - description: "Default Agent Workflow", - sender: userProxyFlowSpec, - receiver: assistantFlowSpec, - type: "autonomous", - }; - - return workFlowConfig; -}; - export const getSampleSkill = () => { const content = ` from typing import List diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx index 8800ebfbdd37..4fda0ad6cd46 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx @@ -187,7 +187,7 @@ const AgentsView = ({}: any) => { aria-hidden="true" className="my-2 break-words" > - {" "} +
{agent.type}
{" "} {truncateText(agent.config.description || "", 70)}
{ diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx index f3c9cb08aa9f..1279bdef4be3 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx @@ -3,10 +3,10 @@ import { IAgent, IModelConfig, ISkill, IWorkflow } from "../../../types"; import { Card } from "../../../atoms"; import { fetchJSON, + getSampleWorkflow, getServerUrl, obscureString, sampleAgentConfig, - sampleWorkflowConfig, truncateText, } from "../../../utils"; import { @@ -19,6 +19,8 @@ import { theme, } from "antd"; import { + ArrowLongRightIcon, + ChatBubbleLeftRightIcon, CodeBracketSquareIcon, ExclamationTriangleIcon, InformationCircleIcon, @@ -354,7 +356,7 @@ export const AgentTypeSelector = ({ return ( <> -
Select Agent Type
+
Select Agent Type
    {agentTypeRows}
); @@ -370,10 +372,18 @@ export const WorkflowTypeSelector = ({ const iconClass = "h-6 w-6 inline-block "; const workflowTypes = [ { - label: "Default", - value: "default", - description: <> Includes a sender and receiver. , - icon: , + label: "Autonomous (Chat)", + value: "autonomous", + description: + "Includes an initiator and receiver. The initiator is typically a user proxy agent, while the receiver could be any agent type (assistant or groupchat", + icon: , + }, + { + label: "Sequential", + value: "sequential", + description: + " Includes a list of agents in a given order. Each agent should have an nstruction and will summarize and pass on the results of their work to the next agent", + icon: , }, ]; const [seletectedWorkflowType, setSelectedWorkflowType] = React.useState< @@ -390,7 +400,7 @@ export const WorkflowTypeSelector = ({ onClick={() => { setSelectedWorkflowType(workflowType.value); if (workflow) { - const sampleWorkflow = sampleWorkflowConfig(); + const sampleWorkflow = getSampleWorkflow(workflowType.value); setWorkflow(sampleWorkflow); } }} @@ -398,9 +408,12 @@ export const WorkflowTypeSelector = ({
{" "}
{workflowType.icon}
- + {" "} - {workflowType.description} + {truncateText(workflowType.description, 60)}
@@ -410,7 +423,7 @@ export const WorkflowTypeSelector = ({ return ( <> -
Select Workflow Type
+
Select Workflow Type
    {workflowTypeRows}
); @@ -964,17 +977,15 @@ export const ModelSelector = ({ agentId }: { agentId: number }) => { }; export const WorkflowAgentSelector = ({ - workflowId, + workflow, }: { - workflowId: number; + workflow: IWorkflow; }) => { const [error, setError] = useState(null); const [loading, setLoading] = useState(false); const [agents, setAgents] = useState([]); - const [senderTargetAgents, setSenderTargetAgents] = useState([]); - const [receiverTargetAgents, setReceiverTargetAgents] = useState( - [] - ); + const [linkedAgents, setLinkedAgents] = useState([]); + const serverUrl = getServerUrl(); const { user } = React.useContext(appContext); @@ -1008,11 +1019,8 @@ export const WorkflowAgentSelector = ({ fetchJSON(listAgentsUrl, payLoad, onSuccess, onError); }; - const fetchTargetAgents = ( - setTarget: (arg0: any) => void, - agentType: string - ) => { - const listTargetAgentsUrl = `${serverUrl}/workflows/link/agent/${workflowId}/${agentType}`; + const fetchLinkedAgents = () => { + const listTargetAgentsUrl = `${serverUrl}/workflows/link/agent/${workflow.id}`; setError(null); setLoading(true); const payLoad = { @@ -1024,7 +1032,8 @@ export const WorkflowAgentSelector = ({ const onSuccess = (data: any) => { if (data && data.status) { - setTarget(data.data); + setLinkedAgents(data.data); + console.log("linked agents", data.data); } else { message.error(data.message); } @@ -1042,7 +1051,8 @@ export const WorkflowAgentSelector = ({ const linkWorkflowAgent = ( workflowId: number, targetAgentId: number, - agentType: string + agentType: string, + sequenceId?: number ) => { setError(null); setLoading(true); @@ -1052,15 +1062,15 @@ export const WorkflowAgentSelector = ({ "Content-Type": "application/json", }, }; - const linkAgentUrl = `${serverUrl}/workflows/link/agent/${workflowId}/${targetAgentId}/${agentType}`; + let linkAgentUrl; + linkAgentUrl = `${serverUrl}/workflows/link/agent/${workflowId}/${targetAgentId}/${agentType}`; + if (agentType === "sequential") { + linkAgentUrl = `${serverUrl}/workflows/link/agent/${workflowId}/${targetAgentId}/${agentType}/${sequenceId}`; + } const onSuccess = (data: any) => { if (data && data.status) { message.success(data.message); - if (agentType === "sender") { - fetchTargetAgents(setSenderTargetAgents, "sender"); - } else { - fetchTargetAgents(setReceiverTargetAgents, "receiver"); - } + fetchLinkedAgents(); } else { message.error(data.message); } @@ -1076,11 +1086,7 @@ export const WorkflowAgentSelector = ({ fetchJSON(linkAgentUrl, payLoad, onSuccess, onError); }; - const unlinkWorkflowAgent = ( - workflowId: number, - targetAgentId: number, - agentType: string - ) => { + const unlinkWorkflowAgent = (agent: IAgent, link: any) => { setError(null); setLoading(true); const payLoad = { @@ -1089,16 +1095,17 @@ export const WorkflowAgentSelector = ({ "Content-Type": "application/json", }, }; - const unlinkAgentUrl = `${serverUrl}/workflows/link/agent/${workflowId}/${targetAgentId}/${agentType}`; + + let unlinkAgentUrl; + unlinkAgentUrl = `${serverUrl}/workflows/link/agent/${workflow.id}/${agent.id}/${link.agent_type}`; + if (link.agent_type === "sequential") { + unlinkAgentUrl = `${serverUrl}/workflows/link/agent/${workflow.id}/${agent.id}/${link.agent_type}/${link.sequence_id}`; + } const onSuccess = (data: any) => { if (data && data.status) { message.success(data.message); - if (agentType === "sender") { - fetchTargetAgents(setSenderTargetAgents, "sender"); - } else { - fetchTargetAgents(setReceiverTargetAgents, "receiver"); - } + fetchLinkedAgents(); } else { message.error(data.message); } @@ -1116,8 +1123,7 @@ export const WorkflowAgentSelector = ({ useEffect(() => { fetchAgents(); - fetchTargetAgents(setSenderTargetAgents, "sender"); - fetchTargetAgents(setReceiverTargetAgents, "receiver"); + fetchLinkedAgents(); }, []); const agentItems: MenuProps["items"] = @@ -1145,9 +1151,26 @@ export const WorkflowAgentSelector = ({ const receiverOnclick: MenuProps["onClick"] = ({ key }) => { const selectedIndex = parseInt(key.toString()); let selectedAgent = agents[selectedIndex]; + if (selectedAgent && selectedAgent.id && workflow.id) { + linkWorkflowAgent(workflow.id, selectedAgent.id, "receiver"); + } + }; - if (selectedAgent && selectedAgent.id) { - linkWorkflowAgent(workflowId, selectedAgent.id, "receiver"); + const sequenceOnclick: MenuProps["onClick"] = ({ key }) => { + const selectedIndex = parseInt(key.toString()); + let selectedAgent = agents[selectedIndex]; + + if (selectedAgent && selectedAgent.id && workflow.id) { + const sequenceId = + linkedAgents.length > 0 + ? linkedAgents[linkedAgents.length - 1].link.sequence_id + 1 + : 0; + linkWorkflowAgent( + workflow.id, + selectedAgent.id, + "sequential", + sequenceId + ); } }; @@ -1155,18 +1178,16 @@ export const WorkflowAgentSelector = ({ const selectedIndex = parseInt(key.toString()); let selectedAgent = agents[selectedIndex]; - if (selectedAgent && selectedAgent.id) { - linkWorkflowAgent(workflowId, selectedAgent.id, "sender"); + if (selectedAgent && selectedAgent.id && workflow.id) { + linkWorkflowAgent(workflow.id, selectedAgent.id, "sender"); } }; - const handleRemoveAgent = (index: number, agentType: string) => { - const targetAgents = - agentType === "sender" ? senderTargetAgents : receiverTargetAgents; - const agent = targetAgents[index]; - if (agent && agent.id) { - unlinkWorkflowAgent(workflowId, agent.id, agentType); + const handleRemoveAgent = (agent: IAgent, link: any) => { + if (agent && agent.id && workflow.id) { + unlinkWorkflowAgent(agent, link); } + console.log(link); }; const { token } = useToken(); @@ -1185,9 +1206,11 @@ export const WorkflowAgentSelector = ({ onClick: MenuProps["onClick"]; agentType: string; }) => { - const targetAgents = - agentType === "sender" ? senderTargetAgents : receiverTargetAgents; - const agentButtons = targetAgents.map((agent, i) => { + const targetAgents = linkedAgents.filter( + (row) => row.link.agent_type === agentType + ); + + const agentButtons = targetAgents.map(({ agent, link }, i) => { const tooltipText = ( <>
{agent.config.name}
@@ -1197,26 +1220,32 @@ export const WorkflowAgentSelector = ({ ); return ( -
-
+
+
{" "} - -
{agent.config.name}
{" "} -
-
{ - e.stopPropagation(); // Prevent opening the modal to edit - handleRemoveAgent(i, agentType); - }} - className="ml-1 text-primary hover:text-accent duration-300" - > - +
+ {" "} + +
{agent.config.name}
{" "} +
+
{ + e.stopPropagation(); // Prevent opening the modal to edit + handleRemoveAgent(agent, link); + }} + className="ml-1 text-primary hover:text-accent duration-300" + > + +
+ {link.agent_type === "sequential" && + i !== targetAgents.length - 1 && ( +
+ {" "} +
+ )}
); }); @@ -1239,7 +1268,8 @@ export const WorkflowAgentSelector = ({ remove current agents and add new ones.
)} - {targetAgents && targetAgents.length < 1 && ( + {((targetAgents.length < 1 && agentType !== "sequential") || + agentType === "sequential") && ( {" "}
Add {title} @@ -1282,33 +1312,48 @@ export const WorkflowAgentSelector = ({ return (
-
-
-

- Initiator{" "} - - - -

-
    - -
+ {workflow.type === "autonomous" && ( +
+
+

+ Initiator{" "} + + + +

+
    + +
+
+
+

Receiver

+
    + +
+
+ )} + + {workflow.type === "sequential" && (
-

Receiver

+

Agents

-
+ )}
); }; diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/workflowconfig.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/workflowconfig.tsx index 8b97f3118629..c42c2e9be302 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/workflowconfig.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/workflowconfig.tsx @@ -165,7 +165,7 @@ export const WorkflowViewConfig = ({ )} {workflow?.id && (
{ uploadWorkflow(); return; } - showWorkflow(sampleWorkflowConfig(key)); + showWorkflow(sampleWorkflow); }; return ( @@ -388,7 +390,7 @@ const WorkflowView = ({}: any) => { placement="bottomRight" trigger={["click"]} onClick={() => { - showWorkflow(sampleWorkflowConfig()); + showWorkflow(sampleWorkflow); }} > diff --git a/samples/apps/autogen-studio/frontend/src/styles/global.css b/samples/apps/autogen-studio/frontend/src/styles/global.css index a46b3712ded0..53531856fc48 100644 --- a/samples/apps/autogen-studio/frontend/src/styles/global.css +++ b/samples/apps/autogen-studio/frontend/src/styles/global.css @@ -289,7 +289,8 @@ iiz__zoom-img { .ant-modal-footer { @apply border-secondary !important; } -.ant-btn { +.ant-btn, +.ant-btn:hover { @apply text-primary !important; } :where(.ant-btn).ant-btn-compact-item.ant-btn-primary:not([disabled]) From f4bd6acbb47ccc6b1e5d7613b0b0f71c5e416be9 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Sat, 18 May 2024 07:59:07 -0700 Subject: [PATCH 03/15] add skeleton for profiler --- .../autogenstudio/chatmanager.py | 9 +------- .../autogen-studio/autogenstudio/datamodel.py | 10 ++++++++- .../autogen-studio/autogenstudio/profiler.py | 22 +++++++++++++++++++ .../autogenstudio/workflowmanager.py | 1 + 4 files changed, 33 insertions(+), 9 deletions(-) create mode 100644 samples/apps/autogen-studio/autogenstudio/profiler.py diff --git a/samples/apps/autogen-studio/autogenstudio/chatmanager.py b/samples/apps/autogen-studio/autogenstudio/chatmanager.py index 18f4a0634bdc..a91401e6663d 100644 --- a/samples/apps/autogen-studio/autogenstudio/chatmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/chatmanager.py @@ -1,7 +1,5 @@ import asyncio -import json import os -import time from datetime import datetime from queue import Queue from typing import Any, Dict, List, Optional, Tuple, Union @@ -9,12 +7,7 @@ import websockets from fastapi import WebSocket, WebSocketDisconnect -from .datamodel import Message, SocketMessage, Workflow -from .utils import ( - extract_successful_code_blocks, - get_modified_files, - summarize_chat_history, -) +from .datamodel import Message from .workflowmanager import WorkflowManager diff --git a/samples/apps/autogen-studio/autogenstudio/datamodel.py b/samples/apps/autogen-studio/autogenstudio/datamodel.py index 0fb749414de8..20e26f7f0a70 100644 --- a/samples/apps/autogen-studio/autogenstudio/datamodel.py +++ b/samples/apps/autogen-studio/autogenstudio/datamodel.py @@ -20,6 +20,14 @@ # pylint: disable=protected-access +class MessageMeta(SQLModel, table=False): + task: Optional[str] = None + messages: Optional[List[Dict[str, Any]]] = None + summary_method: Optional[str] = "last" + files: Optional[List[dict]] = None + time: Optional[datetime] = None + + class Message(SQLModel, table=True): __table_args__ = {"sqlite_autoincrement": True} id: Optional[int] = Field(default=None, primary_key=True) @@ -38,7 +46,7 @@ class Message(SQLModel, table=True): default=None, sa_column=Column(Integer, ForeignKey("session.id", ondelete="CASCADE")) ) connection_id: Optional[str] = None - meta: Optional[Dict] = Field(default={}, sa_column=Column(JSON)) + meta: Optional[Union[MessageMeta, dict]] = Field(default={}, sa_column=Column(JSON)) class Session(SQLModel, table=True): diff --git a/samples/apps/autogen-studio/autogenstudio/profiler.py b/samples/apps/autogen-studio/autogenstudio/profiler.py new file mode 100644 index 000000000000..59448a42000f --- /dev/null +++ b/samples/apps/autogen-studio/autogenstudio/profiler.py @@ -0,0 +1,22 @@ +# metrics - agent_frequency, execution_count, tool_count, + +from .datamodel import Message + + +class Profiler: + """ + Profiler class to profile agent task runs and compute metrics + for performance evaluation. + """ + + def __init__(self): + self.metrics = [] + + def profile(self, agent_message: Message): + """ + Profile the agent task run and compute metrics. + + :param agent: The agent instance that ran the task. + :param task: The task instance that was run. + """ + pass diff --git a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py index d81d2ce2290b..53c31fe41790 100644 --- a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py @@ -523,6 +523,7 @@ def run(self, message: str, history: Optional[List[Message]] = None, clear_histo "summary_method": self.workflow.get("summary_method", "last"), "time": end_time - start_time, "files": get_modified_files(start_time, end_time, source_dir=self.work_dir), + "task": message, }, ) return result_message From 4c697f76c7f518474b09cdaa2652e420703325a8 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Sat, 25 May 2024 08:24:45 -0700 Subject: [PATCH 04/15] add profiler visualization' --- .../autogen-studio/autogenstudio/profiler.py | 89 ++++++++++++++++++- .../autogen-studio/autogenstudio/web/app.py | 22 +++++ .../autogenstudio/workflowmanager.py | 29 +++++- .../views/builder/utils/selectors.tsx | 6 +- .../components/views/playground/chatbox.tsx | 52 +++++++++-- .../views/playground/utils/charts/bar.tsx | 58 ++++++++++++ .../views/playground/utils/charts/radar.tsx | 68 ++++++++++++++ .../views/playground/utils/profiler.tsx | 71 +++++++++++++++ 8 files changed, 378 insertions(+), 17 deletions(-) create mode 100644 samples/apps/autogen-studio/frontend/src/components/views/playground/utils/charts/bar.tsx create mode 100644 samples/apps/autogen-studio/frontend/src/components/views/playground/utils/charts/radar.tsx create mode 100644 samples/apps/autogen-studio/frontend/src/components/views/playground/utils/profiler.tsx diff --git a/samples/apps/autogen-studio/autogenstudio/profiler.py b/samples/apps/autogen-studio/autogenstudio/profiler.py index 59448a42000f..192c6002a82a 100644 --- a/samples/apps/autogen-studio/autogenstudio/profiler.py +++ b/samples/apps/autogen-studio/autogenstudio/profiler.py @@ -1,6 +1,8 @@ # metrics - agent_frequency, execution_count, tool_count, -from .datamodel import Message +from typing import Dict, List, Optional + +from .datamodel import Message, MessageMeta class Profiler: @@ -10,7 +12,51 @@ class Profiler: """ def __init__(self): - self.metrics = [] + self.metrics: List[Dict] = [] + + def _is_code(self, message: Message) -> bool: + """ + Check if the message contains code. + + :param message: The message instance to check. + :return: True if the message contains code, False otherwise. + """ + content = message.get("message").get("content").lower() + return "```" in content + + def _is_tool(self, message: Message) -> bool: + """ + Check if the message uses a tool. + + :param message: The message instance to check. + :return: True if the message uses a tool, False otherwise. + """ + content = message.get("message").get("content").lower() + return "from skills import" in content + + def _is_code_execution(self, message: Message) -> bool: + """ + Check if the message indicates code execution. + + :param message: The message instance to check. + :return: dict with is_code and status keys. + """ + content = message.get("message").get("content").lower() + if "exitcode:" in content: + status = "exitcode: 0" in content + return {"is_code": True, "status": status} + else: + return {"is_code": False, "status": False} + + def _is_terminate(self, message: Message) -> bool: + """ + Check if the message indicates termination. + + :param message: The message instance to check. + :return: True if the message indicates termination, False otherwise. + """ + content = message.get("message").get("content").lower() + return "terminate" in content def profile(self, agent_message: Message): """ @@ -19,4 +65,41 @@ def profile(self, agent_message: Message): :param agent: The agent instance that ran the task. :param task: The task instance that was run. """ - pass + meta = MessageMeta(**agent_message.meta) + messages = meta.messages + profile = [] + bar = [] + stats = {} + total_code_execution = 0 + success_code_execution = 0 + agents = [] + for message in messages: + agent = message.get("sender") + is_code = self._is_code(message) + is_tool = self._is_tool(message) + is_code_execution = self._is_code_execution(message) + total_code_execution += is_code_execution["is_code"] + success_code_execution += 1 if is_code_execution["status"] else 0 + + row = { + "agent": agent, + "tool_call": is_code, + "code_execution": is_code_execution, + "terminate": self._is_terminate(message), + } + bar_row = { + "agent": agent, + "tool_call": "tool call" if is_tool else "no tool call", + "code_execution": ( + "success" + if is_code_execution["status"] + else "failure" if is_code_execution["is_code"] else "no code" + ), + "message": 1, + } + profile.append(row) + bar.append(bar_row) + agents.append(agent) + code_success_rate = (success_code_execution / total_code_execution if total_code_execution > 0 else 0) * 100 + stats["code_success_rate"] = code_success_rate + return {"profile": profile, "bar": bar, "stats": stats, "agents": set(agents)} diff --git a/samples/apps/autogen-studio/autogenstudio/web/app.py b/samples/apps/autogen-studio/autogenstudio/web/app.py index 37560f0892ea..708a396ec79f 100644 --- a/samples/apps/autogen-studio/autogenstudio/web/app.py +++ b/samples/apps/autogen-studio/autogenstudio/web/app.py @@ -16,9 +16,11 @@ from ..database import workflow_from_id from ..database.dbmanager import DBManager from ..datamodel import Agent, Message, Model, Response, Session, Skill, Workflow +from ..profiler import Profiler from ..utils import check_and_cast_datetime_fields, init_app_folders, md5_hash, test_model from ..version import VERSION +profiler = Profiler() managers = {"chat": None} # manage calls to autogen # Create thread-safe queue for messages between api thread and autogen threads message_queue = queue.Queue() @@ -383,6 +385,26 @@ async def get_linked_workflow_agents(workflow_id: int): ) +@api.get("/profiler/{message_id}") +async def profile_agent_task_run(message_id: int): + """Profile an agent task run""" + try: + agent_message = dbmanager.get(Message, filters={"id": message_id}).data[0] + + profile = profiler.profile(agent_message) + return { + "status": True, + "message": "Agent task run profiled successfully", + "data": profile, + } + except Exception as ex_error: + print(traceback.format_exc()) + return { + "status": False, + "message": "Error occurred while profiling agent task run: " + str(ex_error), + } + + @api.get("/sessions") async def list_sessions(user_id: str): """List all sessions for a user""" diff --git a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py index 53c31fe41790..8d3de0a5d7ee 100644 --- a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py @@ -6,7 +6,16 @@ import autogen -from .datamodel import Agent, AgentType, Message, SocketMessage, Workflow, WorkFlowSummaryMethod, WorkFlowType +from .datamodel import ( + Agent, + AgentType, + CodeExecutionConfigTypes, + Message, + SocketMessage, + Workflow, + WorkFlowSummaryMethod, + WorkFlowType, +) from .utils import ( clear_folder, get_modified_files, @@ -57,6 +66,14 @@ def __init__( self.send_message_function = send_message_function self.connection_id = connection_id self.work_dir = work_dir or "work_dir" + self.code_executor_pool = { + CodeExecutionConfigTypes.local: load_code_execution_config( + CodeExecutionConfigTypes.local, work_dir=self.work_dir + ), + CodeExecutionConfigTypes.docker: load_code_execution_config( + CodeExecutionConfigTypes.docker, work_dir=self.work_dir + ), + } if clear_work_dir: clear_folder(self.work_dir) self.agent_history = [] @@ -224,9 +241,13 @@ def get_default_system_message(agent_type: str) -> str: config_list.append(sanitized_llm) agent.config.llm_config.config_list = config_list - agent.config.code_execution_config = load_code_execution_config( - agent.config.code_execution_config, work_dir=self.work_dir - ) + agent.config.code_execution_config = self.code_executor_pool.get(agent.config.code_execution_config, False) + + print("**** pool ****", self.code_executor_pool, "****", agent.config.code_execution_config) + + # executor = self.code_executor_pool.get( + # agent.config.code_execution_config) + # print("*****", executor, "****", agent.config.code_execution_config) if skills: skills_prompt = "" diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx index 1279bdef4be3..eb4df630bbdf 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx @@ -1251,7 +1251,7 @@ export const WorkflowAgentSelector = ({ }); return ( -
+
{(!targetAgents || targetAgents.length === 0) && (
@@ -1343,8 +1343,8 @@ export const WorkflowAgentSelector = ({ )} {workflow.type === "sequential" && ( -
-

Agents

+
+
Agents
    state.messages); const setMessages = useConfigStore((state) => state.setMessages); - const parseMessage = (message: any) => { + const parseMessage = (message: IMessage) => { let meta; try { meta = JSON.parse(message.meta); @@ -104,7 +108,7 @@ const ChatBox = ({ text: message.content, sender: message.role === "user" ? "user" : "bot", meta: meta, - msg_id: message.msg_id, + id: message.id, }; return msg; }; @@ -237,10 +241,45 @@ const ChatBox = ({ />
)} - {message.meta && ( -
- -
+ {message.meta && !isUser && ( + <> + {" "} + + {" "} + + Agent Messages + + ), + key: "1", + children: ( +
+ +
+ ), + }, + { + label: ( +
+ {" "} + {" "} + Profiler +
+ ), + key: "2", + children: ( +
+ +
+ ), + }, + ]} + /> + )}
@@ -409,7 +448,6 @@ const ChatBox = ({ const userMessage: IChatMessage = { text: query, sender: "user", - msg_id: guid(), }; messageHolder.push(userMessage); setMessages(messageHolder); diff --git a/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/charts/bar.tsx b/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/charts/bar.tsx new file mode 100644 index 000000000000..d5d165597a0d --- /dev/null +++ b/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/charts/bar.tsx @@ -0,0 +1,58 @@ +import { Bar, Line } from "@ant-design/plots"; +import * as React from "react"; +import { IStatus } from "../../../../types"; + +const BarChartViewer = ({ data }: { data: any | null }) => { + const [error, setError] = React.useState({ + status: true, + message: "All good", + }); + + const [loading, setLoading] = React.useState(false); + + const config = { + data: data.bar, + xField: "agent", + yField: "message", + colorField: "tool_call", + stack: true, + axis: { + y: { labelFormatter: "" }, + x: { + labelSpacing: 4, + }, + }, + style: { + radiusTopLeft: 10, + radiusTopRight: 10, + }, + height: 60 * data.agents.length, + }; + + const config_code_exec = Object.assign({}, config); + config_code_exec.colorField = "code_execution"; + + return ( +
+
+
+
+
+ {" "} + Total Messages (by Tool Call) +
+ +
+
+
+ {" "} + Total Messages (by Code Execution Status) +
+ +
+
+
+
+ ); +}; +export default BarChartViewer; diff --git a/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/charts/radar.tsx b/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/charts/radar.tsx new file mode 100644 index 000000000000..13ea3adffb5c --- /dev/null +++ b/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/charts/radar.tsx @@ -0,0 +1,68 @@ +import { Radar } from "@ant-design/plots"; +import * as React from "react"; +import { IStatus } from "../../../../types"; +import { getServerUrl } from "../../../../utils"; +import { appContext } from "../../../../../hooks/provider"; + +const RadarMetrics = ({ profileData }: { profileData: any | null }) => { + const [error, setError] = React.useState({ + status: true, + message: "All good", + }); + + const data = [ + { item: "Design", type: "a", score: 70 }, + { item: "Design", type: "b", score: 30 }, + { item: "Development", type: "a", score: 60 }, + { item: "Development", type: "b", score: 70 }, + { item: "Marketing", type: "a", score: 50 }, + { item: "Marketing", type: "b", score: 60 }, + { item: "Users", type: "a", score: 40 }, + { item: "Users", type: "b", score: 50 }, + { item: "Test", type: "a", score: 60 }, + { item: "Test", type: "b", score: 70 }, + { item: "Language", type: "a", score: 70 }, + { item: "Language", type: "b", score: 50 }, + { item: "Technology", type: "a", score: 50 }, + { item: "Technology", type: "b", score: 40 }, + { item: "Support", type: "a", score: 30 }, + { item: "Support", type: "b", score: 40 }, + { item: "Sales", type: "a", score: 60 }, + { item: "Sales", type: "b", score: 40 }, + { item: "UX", type: "a", score: 50 }, + { item: "UX", type: "b", score: 60 }, + ]; + + const config = { + data, + xField: "item", + yField: "score", + colorField: "type", + area: { + style: { + fillOpacity: 0.5, + }, + }, + scale: { + x: { padding: 0.5, align: 0 }, + y: { tickCount: 5, domainMax: 80 }, + }, + axis: { x: { grid: true }, y: { zIndex: 1, title: false } }, + style: { + lineWidth: 2, + }, + }; + + const [loading, setLoading] = React.useState(false); + const [profile, setProfile] = React.useState(null); + + const { user } = React.useContext(appContext); + const serverUrl = getServerUrl(); + + return ( +
+ +
+ ); +}; +export default RadarMetrics; diff --git a/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/profiler.tsx b/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/profiler.tsx new file mode 100644 index 000000000000..f8d72054ac38 --- /dev/null +++ b/samples/apps/autogen-studio/frontend/src/components/views/playground/utils/profiler.tsx @@ -0,0 +1,71 @@ +import { Select, message } from "antd"; +import * as React from "react"; +import { LoadingOverlay } from "../../../atoms"; +import { IWorkflow, IStatus, IMessage, IChatMessage } from "../../../types"; +import { fetchJSON, getServerUrl } from "../../../utils"; +import { appContext } from "../../../../hooks/provider"; +import { Link } from "gatsby"; +import RadarMetrics from "./charts/radar"; +import BarChart from "@ant-design/plots/es/components/bar"; +import BarChartViewer from "./charts/bar"; + +const ProfilerView = ({ + agentMessage, +}: { + agentMessage: IChatMessage | null; +}) => { + const [error, setError] = React.useState({ + status: true, + message: "All good", + }); + + const [loading, setLoading] = React.useState(false); + const [profile, setProfile] = React.useState(null); + + const { user } = React.useContext(appContext); + const serverUrl = getServerUrl(); + + const fetchProfile = (messageId: number) => { + const profilerUrl = `${serverUrl}/profiler/${messageId}/?user_id=${user?.email}`; + setError(null); + setLoading(true); + const payLoad = { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + }; + + const onSuccess = (data: any) => { + console.log(data); + if (data && data.status) { + setProfile(data.data); + } else { + message.error(data.message); + } + setLoading(false); + }; + const onError = (err: any) => { + setError(err); + message.error(err.message); + setLoading(false); + }; + fetchJSON(profilerUrl, payLoad, onSuccess, onError); + }; + + React.useEffect(() => { + if (user && agentMessage && agentMessage.id) { + fetchProfile(agentMessage.id); + } + }, []); + + return ( +
+
+ {/* {profile && } */} + {profile && } +
+
+ ); +}; +export default ProfilerView; From bf844f5a6092bd56ddb75abaaf43ba62a1e7b575 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Sat, 25 May 2024 08:26:52 -0700 Subject: [PATCH 05/15] improve docker support, fix default sequence id bug, ensure copy of entities in UI are sanitized --- .../autogenstudio/database/dbmanager.py | 3 ++- .../autogenstudio/utils/utils.py | 6 +++++- .../apps/autogen-studio/frontend/package.json | 1 + .../frontend/src/components/header.tsx | 2 +- .../frontend/src/components/types.ts | 4 +++- .../src/components/views/builder/agents.tsx | 8 ++------ .../src/components/views/builder/models.tsx | 20 ++++--------------- .../src/components/views/builder/skills.tsx | 8 ++------ .../src/components/views/builder/workflow.tsx | 9 ++------- 9 files changed, 22 insertions(+), 39 deletions(-) diff --git a/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py b/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py index f34d3b165dc6..6a02a0a7038c 100644 --- a/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/database/dbmanager.py @@ -418,7 +418,7 @@ def unlink( primary_id: int, secondary_id: int, agent_type: Optional[str] = None, - sequence_id: Optional[int] = None, + sequence_id: Optional[int] = 0, ) -> Response: """ Unlink two entities. @@ -434,6 +434,7 @@ def unlink( """ status = True status_message = "" + print("primary", primary_id, "secondary", secondary_id, "sequence", sequence_id, "agent_type", agent_type) if link_type not in valid_link_types: status = False diff --git a/samples/apps/autogen-studio/autogenstudio/utils/utils.py b/samples/apps/autogen-studio/autogenstudio/utils/utils.py index 9a27044e5362..36ab78604c30 100644 --- a/samples/apps/autogen-studio/autogenstudio/utils/utils.py +++ b/samples/apps/autogen-studio/autogenstudio/utils/utils.py @@ -429,7 +429,11 @@ def load_code_execution_config(code_execution_type: CodeExecutionConfigTypes, wo if code_execution_type == CodeExecutionConfigTypes.local: executor = LocalCommandLineCodeExecutor(work_dir=work_dir) elif code_execution_type == CodeExecutionConfigTypes.docker: - executor = DockerCommandLineCodeExecutor(work_dir=work_dir) + try: + executor = DockerCommandLineCodeExecutor(work_dir=work_dir) + except Exception as e: + logger.error(f"Error initializing Docker executor: {e}") + return False elif code_execution_type == CodeExecutionConfigTypes.none: return False else: diff --git a/samples/apps/autogen-studio/frontend/package.json b/samples/apps/autogen-studio/frontend/package.json index da33db85014c..88efb9413dd6 100644 --- a/samples/apps/autogen-studio/frontend/package.json +++ b/samples/apps/autogen-studio/frontend/package.json @@ -18,6 +18,7 @@ }, "dependencies": { "@ant-design/charts": "^1.3.6", + "@ant-design/plots": "^2.2.2", "@headlessui/react": "^1.7.16", "@heroicons/react": "^2.0.18", "@mdx-js/mdx": "^1.6.22", diff --git a/samples/apps/autogen-studio/frontend/src/components/header.tsx b/samples/apps/autogen-studio/frontend/src/components/header.tsx index d0adf2e0a3ab..8ec853269233 100644 --- a/samples/apps/autogen-studio/frontend/src/components/header.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/header.tsx @@ -25,7 +25,7 @@ const Header = ({ meta, link }: any) => { const links: any[] = [ { name: "Build", href: "/build" }, { name: "Playground", href: "/" }, - // { name: "Gallery", href: "/gallery" }, + { name: "Gallery", href: "/gallery" }, // { name: "Data Explorer", href: "/explorer" }, ]; diff --git a/samples/apps/autogen-studio/frontend/src/components/types.ts b/samples/apps/autogen-studio/frontend/src/components/types.ts index 2ff075ad46cf..2bf90b3afd44 100644 --- a/samples/apps/autogen-studio/frontend/src/components/types.ts +++ b/samples/apps/autogen-studio/frontend/src/components/types.ts @@ -9,6 +9,8 @@ export interface IMessage { session_id?: number; connection_id?: string; workflow_id?: number; + meta?: any; + id?: number; } export interface IStatus { @@ -21,7 +23,7 @@ export interface IChatMessage { text: string; sender: "user" | "bot"; meta?: any; - msg_id: string; + id?: number; } export interface ILLMConfig { diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx index 4fda0ad6cd46..0edb58d1ff5c 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/agents.tsx @@ -141,13 +141,9 @@ const AgentsView = ({}: any) => { icon: DocumentDuplicateIcon, onClick: (e: any) => { e.stopPropagation(); - let newAgent = { ...agent }; + let newAgent = { ...sanitizeConfig(agent) }; newAgent.config.name = `${agent.config.name}_copy`; - newAgent.user_id = user?.email; - newAgent.updated_at = new Date().toISOString(); - if (newAgent.id) { - delete newAgent.id; - } + console.log("newAgent", newAgent); setNewAgent(newAgent); setShowNewAgentModal(true); }, diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/models.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/models.tsx index 2a3b0506d79c..87ae739b62e7 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/models.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/models.tsx @@ -6,7 +6,7 @@ import { PlusIcon, TrashIcon, } from "@heroicons/react/24/outline"; -import { Button, Dropdown, Input, MenuProps, Modal, message } from "antd"; +import { Dropdown, MenuProps, Modal, message } from "antd"; import * as React from "react"; import { IModelConfig, IStatus } from "../../types"; import { appContext } from "../../../hooks/provider"; @@ -17,14 +17,7 @@ import { timeAgo, truncateText, } from "../../utils"; -import { - BounceLoader, - Card, - CardHoverBar, - ControlRowView, - LoadingOverlay, -} from "../../atoms"; -import TextArea from "antd/es/input/TextArea"; +import { BounceLoader, Card, CardHoverBar, LoadingOverlay } from "../../atoms"; import { ModelConfigView } from "./utils/modelconfig"; const ModelsView = ({}: any) => { @@ -175,13 +168,8 @@ const ModelsView = ({}: any) => { icon: DocumentDuplicateIcon, onClick: (e: any) => { e.stopPropagation(); - let newModel = { ...model }; - newModel.model = `${model.model} Copy`; - newModel.user_id = user?.email; - newModel.updated_at = new Date().toISOString(); - if (newModel.id) { - delete newModel.id; - } + let newModel = { ...sanitizeConfig(model) }; + newModel.model = `${model.model}_copy`; setNewModel(newModel); setShowNewModelModal(true); }, diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/skills.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/skills.tsx index 77b50588dd20..128e53410552 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/skills.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/skills.tsx @@ -173,12 +173,8 @@ const SkillsView = ({}: any) => { icon: DocumentDuplicateIcon, onClick: (e: any) => { e.stopPropagation(); - let newSkill = { ...skill }; - newSkill.name = `${skill.name} Copy`; - newSkill.user_id = user?.email; - if (newSkill.id) { - delete newSkill.id; - } + let newSkill = { ...sanitizeConfig(skill) }; + newSkill.name = `${skill.name}_copy`; setNewSkill(newSkill); setShowNewSkillModal(true); }, diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/workflow.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/workflow.tsx index 5667a0be3b51..025ad77c7dd2 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/workflow.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/workflow.tsx @@ -163,13 +163,8 @@ const WorkflowView = ({}: any) => { icon: DocumentDuplicateIcon, onClick: (e: any) => { e.stopPropagation(); - let newWorkflow = { ...workflow }; - newWorkflow.name = `${workflow.name} Copy`; - newWorkflow.user_id = user?.email; - if (newWorkflow.id) { - delete newWorkflow.id; - } - + let newWorkflow = { ...sanitizeConfig(workflow) }; + newWorkflow.name = `${workflow.name}_copy`; setNewWorkflow(newWorkflow); setShowNewWorkflowModal(true); }, From afee520c8332d513a0d2fa76eb3f42f7f68e0045 Mon Sep 17 00:00:00 2001 From: knucklessg1 Date: Thu, 6 Jun 2024 11:12:15 -0500 Subject: [PATCH 06/15] Added react-refresh to allow for hot reloading of website. --- samples/apps/autogen-studio/frontend/package.json | 1 + 1 file changed, 1 insertion(+) diff --git a/samples/apps/autogen-studio/frontend/package.json b/samples/apps/autogen-studio/frontend/package.json index 88efb9413dd6..36492fa88369 100644 --- a/samples/apps/autogen-studio/frontend/package.json +++ b/samples/apps/autogen-studio/frontend/package.json @@ -49,6 +49,7 @@ "react-dom": "^18.2.0", "react-inner-image-zoom": "^3.0.2", "react-markdown": "^8.0.7", + "react-refresh": "^0.14.2", "react-resizable": "^3.0.5", "react-router-dom": "^6.3.0", "react-syntax-highlighter": "^15.5.0", From b123f449aadaa13d688a52d75311bf4aaf203c56 Mon Sep 17 00:00:00 2001 From: knucklessg1 Date: Thu, 6 Jun 2024 11:13:19 -0500 Subject: [PATCH 07/15] Added retrieve agents. --- .../autogen-studio/autogenstudio/datamodel.py | 2 + .../views/builder/utils/agentconfig.tsx | 130 ++++++++++++++++++ .../views/builder/utils/selectors.tsx | 16 ++- 3 files changed, 146 insertions(+), 2 deletions(-) diff --git a/samples/apps/autogen-studio/autogenstudio/datamodel.py b/samples/apps/autogen-studio/autogenstudio/datamodel.py index 20e26f7f0a70..37378126c1d7 100644 --- a/samples/apps/autogen-studio/autogenstudio/datamodel.py +++ b/samples/apps/autogen-studio/autogenstudio/datamodel.py @@ -167,6 +167,8 @@ class AgentConfig(SQLModel, table=False): class AgentType(str, Enum): assistant = "assistant" userproxy = "userproxy" + retrieve_assistant = "retrieve_assistant" + retrieve_userproxy = "retrieve_userproxy" groupchat = "groupchat" diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx index 885a1e402d0d..2b34c5293f2b 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx @@ -399,7 +399,137 @@ export const AgentConfigView = ({ />
)} + {/* ====================== Retrieve Chat Config ======================= */} + {agent.type === "retrieve_userproxy" && ( +
+ { + onControlChange(value, "vector_db"); + }} + options={ + [ + { label: "ChromaDB", value: true }, + { label: "PGVector", value: false }, + ] as any + } + /> + } + /> + + { + onControlChange(e.target.value, "connection_string"); + }} + /> + } + /> + + { + onControlChange(e.target.value, "username"); + }} + /> + } + /> + + { + onControlChange(e.target.value, "password"); + }} + /> + } + /> + + { + onControlChange(e.target.value, "host"); + }} + /> + } + /> + + { + onControlChange(e.target.value, "port"); + }} + /> + } + /> + + { + onControlChange(e.target.value, "dbname"); + }} + /> + } + /> +
+
+ )}
+
diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx index eb4df630bbdf..6deee81a9d10 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/selectors.tsx @@ -312,13 +312,25 @@ export const AgentTypeSelector = ({ { label: "Assistant Agent", value: "assistant", - description: <>Plan and generate code to solve user tasks, + description: <>Plan and generate code to solve user tasks. , + icon: , + }, + { + label: "Retrieve User Proxy Agent", + value: "retrieve_userproxy", + description: <>Typically represents the user and executes code using a vector database for knowledge. , + icon: , + }, + { + label: "Retrieve Assistant Agent", + value: "retrieve_assistant", + description: <>Plan and generate code to solve user tasks using a vector database for knowledge. , icon: , }, { label: "GroupChat ", value: "groupchat", - description: <>Manage group chat interactions, + description: <>Manage group chat interactions. , icon: , }, ]; From 55ebe31b64885ca53780f2e14e9062cd31ccdd25 Mon Sep 17 00:00:00 2001 From: KnucklesSG1 Date: Thu, 6 Jun 2024 12:52:06 -0500 Subject: [PATCH 08/15] Added additional deps for the retrieve config. --- .../frontend/src/components/types.ts | 23 +- .../views/builder/utils/agentconfig.tsx | 888 +++++++++--------- 2 files changed, 457 insertions(+), 454 deletions(-) diff --git a/samples/apps/autogen-studio/frontend/src/components/types.ts b/samples/apps/autogen-studio/frontend/src/components/types.ts index 2bf90b3afd44..f6f4d1a7de2b 100644 --- a/samples/apps/autogen-studio/frontend/src/components/types.ts +++ b/samples/apps/autogen-studio/frontend/src/components/types.ts @@ -50,10 +50,31 @@ export interface IAgentConfig { max_round?: number; speaker_selection_method?: string; allow_repeat_speaker?: boolean; + retrieve_config?: IAgentRetrieveConfig; +} + +export interface IAgentRetrieveConfig { + docs_path?: Array; + custom_text_types?: Array; + chunk_token_size?: number; + vector_db?: "pgvector" | "chromadb"; + db_config?: IAgentDBConfig; + collection_name?: string; + get_or_create?: boolean; + overwrite?: boolean; +} + +export interface IAgentDBConfig { + connection_string?: string; + host?: string; + username?: string; + password?: string; + port?: number; + database?: string; } export interface IAgent { - type?: "assistant" | "userproxy" | "groupchat"; + type?: "assistant" | "userproxy" | "retrieve_assistant" | "retrieve_userproxy" | "groupchat"; config: IAgentConfig; created_at?: string; updated_at?: string; diff --git a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx index 2b34c5293f2b..b7a316b2ed77 100644 --- a/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/views/builder/utils/agentconfig.tsx @@ -29,10 +29,10 @@ import TextArea from "antd/es/input/TextArea"; const { useToken } = theme; export const AgentConfigView = ({ - agent, - setAgent, - close, -}: { + agent, + setAgent, + close, + }: { agent: IAgent; setAgent: (agent: IAgent) => void; close: () => void; @@ -46,11 +46,6 @@ export const AgentConfigView = ({ const [controlChanged, setControlChanged] = React.useState(false); const onControlChange = (value: any, key: string) => { - // if (key === "llm_config") { - // if (value.config_list.length === 0) { - // value = false; - // } - // } const updatedAgent = { ...agent, config: { ...agent.config, [key]: value }, @@ -69,7 +64,6 @@ export const AgentConfigView = ({ const createAgent = (agent: IAgent) => { setError(null); setLoading(true); - // const fetch; console.log("agent", agent); agent.user_id = user?.email; @@ -92,7 +86,6 @@ export const AgentConfigView = ({ message.error(data.message); } setLoading(false); - // setNewAgent(sampleAgent); }; const onError = (err: any) => { setError(err); @@ -108,464 +101,457 @@ export const AgentConfigView = ({ }; const hasChanged = - (!controlChanged || !nameValidation.status) && agent?.id !== undefined; + (!controlChanged || !nameValidation.status) && agent?.id !== undefined; return ( -
-
-
-
- - { - onControlChange(e.target.value, "name"); - }} - /> - {!nameValidation.status && ( -
- {nameValidation.message} -
- )} - - } - /> - - { - onControlChange(e.target.value, "description"); - }} - /> - } - /> - - { - onControlChange(value, "max_consecutive_auto_reply"); - }} - /> - } - /> - - { - onControlChange(value, "human_input_mode"); - }} - options={ - [ - { label: "NEVER", value: "NEVER" }, - // { label: "TERMINATE", value: "TERMINATE" }, - // { label: "ALWAYS", value: "ALWAYS" }, - ] as any - } - /> - } - /> - - { - onControlChange(e.target.value, "system_message"); - }} - /> - } - /> - -
- {" "} - - + +
+
+ { - const llm_config = { - ...agent.config.llm_config, - temperature: value, - }; - onControlChange(llm_config, "llm_config"); - }} - /> + <> + { + onControlChange(e.target.value, "name"); + }} + /> + {!nameValidation.status && ( +
+ {nameValidation.message} +
+ )} + } - /> + /> - { - onControlChange(e.target.value, "default_auto_reply"); - }} + className="mt-2" + placeholder="Agent Description" + value={agent.config.description || ""} + onChange={(e) => { + onControlChange(e.target.value, "description"); + }} /> } - /> + /> - { - const llm_config = { - ...agent.config.llm_config, - max_tokens: value, - }; - onControlChange(llm_config, "llm_config"); - }} + min={1} + max={agent.type === "groupchat" ? 600 : 30} + defaultValue={agent.config.max_consecutive_auto_reply} + step={1} + onChange={(value: any) => { + onControlChange(value, "max_consecutive_auto_reply"); + }} /> } - /> - + + { - onControlChange(value, "code_execution_config"); - }} - options={ - [ - { label: "None", value: "none" }, - { label: "Local", value: "local" }, - { label: "Docker", value: "docker" }, - ] as any - } + className="mt-2 w-full" + defaultValue={agent.config.human_input_mode} + onChange={(value: any) => { + onControlChange(value, "human_input_mode"); + }} + options={ + [ + { label: "NEVER", value: "NEVER" }, + ] as any + } /> } - /> - -
-
- {/* ====================== Group Chat Config ======================= */} - {agent.type === "groupchat" && ( -
- { - if (agent?.config) { - onControlChange(value, "speaker_selection_method"); - } - }} - options={ - [ - { label: "Auto", value: "auto" }, - { label: "Round Robin", value: "round_robin" }, - { label: "Random", value: "random" }, - ] as any - } - /> - } /> { - onControlChange(e.target.value, "admin_name"); - }} - /> - } + title="System Message" + className="mt-4" + description="Free text to control agent behavior" + value={agent.config.system_message} + control={ +