diff --git a/samples/apps/autogen-studio/MANIFEST.in b/samples/apps/autogen-studio/MANIFEST.in index 6406a9240a6b..8882713fa2a6 100644 --- a/samples/apps/autogen-studio/MANIFEST.in +++ b/samples/apps/autogen-studio/MANIFEST.in @@ -1,5 +1,7 @@ recursive-include autogenstudio/web/ui * +recursive-include autogenstudio/web/database.sqlite recursive-exclude notebooks * + recursive-exclude frontend * recursive-exclude docs * recursive-exclude tests * diff --git a/samples/apps/autogen-studio/README.md b/samples/apps/autogen-studio/README.md index 6eb2a1acd877..0f007731d1fa 100644 --- a/samples/apps/autogen-studio/README.md +++ b/samples/apps/autogen-studio/README.md @@ -28,6 +28,8 @@ Project Structure: ### Installation +There are two ways to install AutoGen Studio - from PyPi or from source. We **recommend installing from PyPi** unless you plan to modify the source code. + 1. **Install from PyPi** We recommend using a virtual environment (e.g., conda) to avoid conflicts with existing Python packages. With Python 3.10 or newer active in your virtual environment, use pip to install AutoGen Studio: @@ -108,6 +110,18 @@ The agent workflow responds by _writing and executing code_ to create a python p +## Contribution Guide + +We welcome contributions to AutoGen Studio. We recommend the following general steps to contribute to the project: + +- Review the overall AutoGen project [contribution guide](https://github.com/microsoft/autogen?tab=readme-ov-file#contributing) +- Please review the AutoGen Studio [roadmap](https://github.com/microsoft/autogen/issues/737) to get a sense of the current priorities for the project. Help is appreciated especially with Studio issues tagged with `help-wanted` +- Please initiate a discussion on the roadmap issue or a new issue to discuss your proposed contribution. +- Please review the autogenstudio dev branch here [dev branch](https://github.com/microsoft/autogen/tree/autogenstudio) and use as a base for your contribution. This way, your contribution will be aligned with the latest changes in the AutoGen Studio project. +- Submit a pull request with your contribution! +- If you are modifying AutoGen Studio, it has its own devcontainer. See instructions in `.devcontainer/README.md` to use it +- Please use the tag `studio` for any issues, questions, and PRs related to Studio + ## FAQ **Q: Where can I adjust the default skills, agent and workflow configurations?** @@ -119,6 +133,10 @@ A: To reset your conversation history, you can delete the `database.sqlite` file **Q: Is it possible to view the output and messages generated by the agents during interactions?** A: Yes, you can view the generated messages in the debug console of the web UI, providing insights into the agent interactions. Alternatively, you can inspect the `database.sqlite` file for a comprehensive record of messages. +**Q: Can I use other models with AutoGen Studio?** +Yes. AutoGen standardizes on the openai model api format, and you can use any api server that offers an openai compliant endpoint. In the AutoGen Studio UI, each agent has an `llm_config` field where you can input your model endpoint details including `model name`, `api key`, `base url`, `model type` and `api version`. For Azure OpenAI models, you can find these details in the Azure portal. Note that for Azure OpenAI, the `model name` is the deployment id or engine, and the `model type` is "azure". +For other OSS models, we recommend using a server such as vllm to instantiate an openai compliant endpoint. + ## Acknowledgements AutoGen Studio is Based on the [AutoGen](https://microsoft.github.io/autogen) project. It was adapted from a research prototype built in October 2023 (original credits: Gagan Bansal, Adam Fourney, Victor Dibia, Piali Choudhury, Saleema Amershi, Ahmed Awadallah, Chi Wang). diff --git a/samples/apps/autogen-studio/autogenstudio/chatmanager.py b/samples/apps/autogen-studio/autogenstudio/chatmanager.py index 52dd37c169f0..034a139e9542 100644 --- a/samples/apps/autogen-studio/autogenstudio/chatmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/chatmanager.py @@ -19,7 +19,6 @@ def chat(self, message: Message, history: List, flow_config: AgentWorkFlowConfig if flow_config is None: flow_config = get_default_agent_config(scratch_dir) - # print("Flow config: ", flow_config) flow = AutoGenWorkFlowManager(config=flow_config, history=history, work_dir=scratch_dir) message_text = message.content.strip() diff --git a/samples/apps/autogen-studio/autogenstudio/cli.py b/samples/apps/autogen-studio/autogenstudio/cli.py index 22692700ff06..ed4e89f2a3ef 100644 --- a/samples/apps/autogen-studio/autogenstudio/cli.py +++ b/samples/apps/autogen-studio/autogenstudio/cli.py @@ -4,6 +4,7 @@ import uvicorn from .version import VERSION +from .utils.dbutils import DBManager app = typer.Typer() @@ -15,12 +16,23 @@ def ui( workers: int = 1, reload: Annotated[bool, typer.Option("--reload")] = False, docs: bool = False, + appdir: str = None, ): """ - Launch the AutoGen Studio UI CLI .Pass in parameters host, port, workers, and reload to override the default values. + Run the AutoGen Studio UI. + + Args: + host (str, optional): Host to run the UI on. Defaults to 127.0.0.1 (localhost). + port (int, optional): Port to run the UI on. Defaults to 8081. + workers (int, optional): Number of workers to run the UI with. Defaults to 1. + reload (bool, optional): Whether to reload the UI on code changes. Defaults to False. + docs (bool, optional): Whether to generate API docs. Defaults to False. + appdir (str, optional): Path to the AutoGen Studio app directory. Defaults to None. """ - os.environ["AUTOGENUI_API_DOCS"] = str(docs) + os.environ["AUTOGENSTUDIO_API_DOCS"] = str(docs) + if appdir: + os.environ["AUTOGENSTUDIO_APPDIR"] = appdir uvicorn.run( "autogenstudio.web.app:app", @@ -37,7 +49,7 @@ def version(): Print the version of the AutoGen Studio UI CLI. """ - typer.echo(f"AutoGen Studio UI CLI version: {VERSION}") + typer.echo(f"AutoGen Studio CLI version: {VERSION}") def run(): diff --git a/samples/apps/autogen-studio/autogenstudio/datamodel.py b/samples/apps/autogen-studio/autogenstudio/datamodel.py index be23dbdb086b..283cd2610d59 100644 --- a/samples/apps/autogen-studio/autogenstudio/datamodel.py +++ b/samples/apps/autogen-studio/autogenstudio/datamodel.py @@ -58,7 +58,7 @@ def dict(self): # autogenflow data models @dataclass -class ModelConfig: +class Model: """Data model for Model Config item in LLMConfig for AutoGen""" model: str @@ -66,17 +66,38 @@ class ModelConfig: base_url: Optional[str] = None api_type: Optional[str] = None api_version: Optional[str] = None + id: Optional[str] = None + timestamp: Optional[str] = None + user_id: Optional[str] = None + description: Optional[str] = None + + def dict(self): + result = asdict(self) + return result + + def __post_init__(self): + if self.id is None: + self.id = str(uuid.uuid4()) + if self.timestamp is None: + self.timestamp = datetime.now().isoformat() + if self.user_id is None: + self.user_id = "default" @dataclass class LLMConfig: """Data model for LLM Config for AutoGen""" - config_list: List[Any] = field(default_factory=List) + config_list: List[Any] = field(default_factory=list) temperature: float = 0 cache_seed: Optional[Union[int, None]] = None timeout: Optional[int] = None + def dict(self): + result = asdict(self) + result["config_list"] = [c.dict() for c in self.config_list] + return result + @dataclass class AgentConfig: @@ -101,8 +122,8 @@ def dict(self): class AgentFlowSpec: """Data model to help flow load agents from config""" - type: Literal["assistant", "userproxy", "groupchat"] - config: AgentConfig = field(default_factory=AgentConfig) + type: Literal["assistant", "userproxy"] + config: AgentConfig id: Optional[str] = None timestamp: Optional[str] = None user_id: Optional[str] = None @@ -122,6 +143,51 @@ def dict(self): return result +@dataclass +class GroupChatConfig: + """Data model for GroupChat Config for AutoGen""" + + agents: List[AgentFlowSpec] = field(default_factory=list) + admin_name: str = "Admin" + messages: List[Dict] = field(default_factory=list) + max_round: Optional[int] = 10 + admin_name: Optional[str] = "Admin" + speaker_selection_method: Optional[str] = "auto" + allow_repeat_speaker: Optional[Union[bool, List[AgentConfig]]] = True + + def dict(self): + result = asdict(self) + result["agents"] = [a.dict() for a in self.agents] + return result + + +@dataclass +class GroupChatFlowSpec: + """Data model to help flow load agents from config""" + + type: Literal["groupchat"] + config: AgentConfig = field(default_factory=AgentConfig) + groupchat_config: Optional[GroupChatConfig] = field(default_factory=GroupChatConfig) + id: Optional[str] = None + timestamp: Optional[str] = None + user_id: Optional[str] = None + description: Optional[str] = None + + def __post_init__(self): + if self.timestamp is None: + self.timestamp = datetime.now().isoformat() + if self.id is None: + self.id = str(uuid.uuid4()) + if self.user_id is None: + self.user_id = "default" + + def dict(self): + result = asdict(self) + # result["config"] = self.config.dict() + # result["groupchat_config"] = self.groupchat_config.dict() + return result + + @dataclass class AgentWorkFlowConfig: """Data model for Flow Config for AutoGen""" @@ -129,17 +195,28 @@ class AgentWorkFlowConfig: name: str description: str sender: AgentFlowSpec - receiver: Union[AgentFlowSpec, List[AgentFlowSpec]] - type: Literal["default", "groupchat"] = "default" + receiver: Union[AgentFlowSpec, GroupChatFlowSpec] + type: Literal["twoagents", "groupchat"] = "twoagents" id: Optional[str] = None user_id: Optional[str] = None timestamp: Optional[str] = None # how the agent message summary is generated. last: only last message is used, none: no summary, llm: use llm to generate summary summary_method: Optional[Literal["last", "none", "llm"]] = "last" + def init_spec(self, spec: Dict): + """initialize the agent spec""" + if not isinstance(spec, dict): + spec = spec.dict() + if spec["type"] == "groupchat": + return GroupChatFlowSpec(**spec) + else: + return AgentFlowSpec(**spec) + def __post_init__(self): if self.id is None: self.id = str(uuid.uuid4()) + self.sender = self.init_spec(self.sender) + self.receiver = self.init_spec(self.receiver) if self.user_id is None: self.user_id = "default" if self.timestamp is None: @@ -148,10 +225,7 @@ def __post_init__(self): def dict(self): result = asdict(self) result["sender"] = self.sender.dict() - if isinstance(self.receiver, list): - result["receiver"] = [r.dict() for r in self.receiver] - else: - result["receiver"] = self.receiver.dict() + result["receiver"] = self.receiver.dict() return result @@ -221,3 +295,4 @@ class DBWebRequestModel(object): tags: Optional[List[str]] = None agent: Optional[AgentFlowSpec] = None workflow: Optional[AgentWorkFlowConfig] = None + model: Optional[Model] = None diff --git a/samples/apps/autogen-studio/autogenstudio/utils/dbdefaults.json b/samples/apps/autogen-studio/autogenstudio/utils/dbdefaults.json index 570323652594..e3fd806970ed 100644 --- a/samples/apps/autogen-studio/autogenstudio/utils/dbdefaults.json +++ b/samples/apps/autogen-studio/autogenstudio/utils/dbdefaults.json @@ -1,4 +1,24 @@ { + "models": [ + { + "model": "gpt-4", + "api_key": "Your Azure API key here", + "base_url": "Your Azure base URL here", + "api_type": "azure", + "api_version": "Your Azure API version here", + "description": "Azure Open AI model configuration" + }, + { + "model": "gpt-4-1106-preview", + "description": "OpenAI model configuration" + }, + { + "model": "TheBloke/zephyr-7B-alpha-AWQ", + "api_key": "EMPTY", + "base_url": "Your Model Endpoint", + "description": "Zephyr (local model configuration)" + } + ], "agents": [ { "type": "userproxy", @@ -79,14 +99,15 @@ ], "workflows": [ { - "name": "General Agent Workflow", - "description": "This workflow is used for general purpose tasks.", + "name": "Visualization Agent Workflow", + "description": "This workflow is used for visualization tasks.", "sender": { "type": "userproxy", + "description": "User proxy agent to execute code", "config": { "name": "userproxy", "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 10, + "max_consecutive_auto_reply": 5, "system_message": "", "llm_config": false, "code_execution_config": { @@ -97,23 +118,9 @@ }, "receiver": { "type": "assistant", - "description": "Default assistant to generate plans and write code to solve tasks.", - "skills": [ - { - "title": "find_papers_arxiv", - "description": "This skill finds relevant papers on arXiv given a query.", - "content": "import os\nimport re\nimport json\nimport hashlib\n\n\ndef search_arxiv(query, max_results=10):\n \"\"\"\n Searches arXiv for the given query using the arXiv API, then returns the search results. This is a helper function. In most cases, callers will want to use 'find_relevant_papers( query, max_results )' instead.\n\n Args:\n query (str): The search query.\n max_results (int, optional): The maximum number of search results to return. Defaults to 10.\n\n Returns:\n jresults (list): A list of dictionaries. Each dictionary contains fields such as 'title', 'authors', 'summary', and 'pdf_url'\n\n Example:\n >>> results = search_arxiv(\"attention is all you need\")\n >>> print(results)\n \"\"\"\n\n import arxiv\n\n key = hashlib.md5((\"search_arxiv(\" + str(max_results) + \")\" + query).encode(\"utf-8\")).hexdigest()\n # Create the cache if it doesn't exist\n cache_dir = \".cache\"\n if not os.path.isdir(cache_dir):\n os.mkdir(cache_dir)\n\n fname = os.path.join(cache_dir, key + \".cache\")\n\n # Cache hit\n if os.path.isfile(fname):\n fh = open(fname, \"r\", encoding=\"utf-8\")\n data = json.loads(fh.read())\n fh.close()\n return data\n\n # Normalize the query, removing operator keywords\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s(and|or|not)\\s\", \" \", \" \" + query + \" \")\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s+\", \" \", query).strip()\n\n search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n\n jresults = list()\n for result in search.results():\n r = dict()\n r[\"entry_id\"] = result.entry_id\n r[\"updated\"] = str(result.updated)\n r[\"published\"] = str(result.published)\n r[\"title\"] = result.title\n r[\"authors\"] = [str(a) for a in result.authors]\n r[\"summary\"] = result.summary\n r[\"comment\"] = result.comment\n r[\"journal_ref\"] = result.journal_ref\n r[\"doi\"] = result.doi\n r[\"primary_category\"] = result.primary_category\n r[\"categories\"] = result.categories\n r[\"links\"] = [str(link) for link in result.links]\n r[\"pdf_url\"] = result.pdf_url\n jresults.append(r)\n\n if len(jresults) > max_results:\n jresults = jresults[0:max_results]\n\n # Save to cache\n fh = open(fname, \"w\")\n fh.write(json.dumps(jresults))\n fh.close()\n return jresults\n", - "file_name": "find_papers_arxiv.py" - }, - { - "title": "generate_images", - "description": "This skill generates images from a given query using OpenAI's DALL-E model and saves them to disk.", - "content": "from typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = \"1024x1024\") -> List[str]:\n \"\"\"\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is \"1024x1024\")\n :return: A list of filenames for the saved images.\n \"\"\"\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n", - "file_name": "generate_images.py" - } - ], + "description": "Visualization assistant to create plans and write code to generate visualizations", "config": { - "name": "primary_assistant", + "name": "visualization_assistant", "llm_config": { "config_list": [ { @@ -132,18 +139,18 @@ "cache_seed": null }, "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 15, - "system_message": "You are a helpful assistant that can use available functions when needed to solve problems. At each point, do your best to determine if the user's request has been addressed. IF THE REQUEST HAS NOT BEEN ADDRESSED, RESPOND WITH CODE TO ADDRESS IT. IF A FAILURE OCCURRED (e.g., due to a missing library) AND SOME ADDITIONAL CODE WAS WRITTEN (e.g. code to install the library), ENSURE THAT THE ORIGINAL CODE TO ADDRESS THE TASK STILL GETS EXECUTED. If the request HAS been addressed, respond with a summary of the result. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request ' or 'The tallest mountain in Africa is ..' etc. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." + "max_consecutive_auto_reply": 4, + "system_message": "Your task is to ensure you generate a high quality visualization for the user. Your visualizations must follow best practices and you must articulate your reasoning for your choices. The visualization must not have grid or outline box. The visualization should have an APPROPRIATE ASPECT RATIO e..g rectangular for time series data. The title must be bold. Importantly, if THE CHART IS A LINE CHART, you MUST ADD A LINE OF BEST FIT and ADD TEXT ON THE SLOPE OF EACH LINE. Note that today's date is 12/10/2023. At each point, do your best to determine if the user's request has been addressed and if so, respond with a summary. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request '. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." } }, - "type": "default" + "type": "twoagents" }, { - "name": "Visualization Agent Workflow", - "description": "This workflow is used for visualization tasks.", + "name": "Travel Agent Group Chat Workflow", + "description": "A group chat workflow", + "type": "groupchat", "sender": { "type": "userproxy", - "description": "User proxy agent to execute code", "config": { "name": "userproxy", "human_input_mode": "NEVER", @@ -156,11 +163,127 @@ } } }, + "receiver": { + "type": "groupchat", + "config": { + "name": "group_chat_manager", + "llm_config": { + "config_list": [ + { + "model": "gpt-4-1106-preview" + } + ], + "temperature": 0.1, + "timeout": 600, + "cache_seed": 42 + }, + "human_input_mode": "NEVER", + "system_message": "Group chat manager" + }, + "groupchat_config": { + "admin_name": "Admin", + "max_round": 10, + "speaker_selection_method": "auto", + + "agents": [ + { + "type": "assistant", + "config": { + "name": "primary_assistant", + "llm_config": { + "config_list": [ + { + "model": "gpt-4-1106-preview" + } + ], + "temperature": 0.1, + "timeout": 600, + "cache_seed": 42 + }, + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 8, + "system_message": "You are a helpful assistant that can suggest a travel itinerary for a user. You are the primary cordinator who will receive suggestions or advice from other agents (local_assistant, language_assistant). You must ensure that the finally plan integrates the suggestions from other agents or team members. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN that ends with the word TERMINATE. " + } + }, + { + "type": "assistant", + "config": { + "name": "local_assistant", + "llm_config": { + "config_list": [ + { + "model": "gpt-4-1106-preview" + } + ], + "temperature": 0.1, + "timeout": 600, + "cache_seed": 42 + }, + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 8, + "system_message": "You are a helpful assistant that can review travel plans, providing critical feedback on how the trip can be enriched for enjoyment of the local culture. If the plan already includes local experiences, you can mention that the plan is satisfactory, with rationale." + } + }, + { + "type": "assistant", + "config": { + "name": "language_assistant", + "llm_config": { + "config_list": [ + { + "model": "gpt-4-1106-preview" + } + ], + "temperature": 0.1, + "timeout": 600, + "cache_seed": 42 + }, + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 8, + "system_message": "You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale." + } + } + ] + } + } + }, + { + "name": "General Agent Workflow", + "description": "This workflow is used for general purpose tasks.", + "sender": { + "type": "userproxy", + "description": "A user proxy agent that executes code.", + "config": { + "name": "userproxy", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 10, + "system_message": "", + "llm_config": false, + "code_execution_config": { + "work_dir": null, + "use_docker": false + } + } + }, "receiver": { "type": "assistant", - "description": "Visualization assistant to create plans and write code to generate visualizations", + "description": "Default assistant to generate plans and write code to solve tasks.", + "skills": [ + { + "title": "find_papers_arxiv", + "description": "This skill finds relevant papers on arXiv given a query.", + "content": "import os\nimport re\nimport json\nimport hashlib\n\n\ndef search_arxiv(query, max_results=10):\n \"\"\"\n Searches arXiv for the given query using the arXiv API, then returns the search results. This is a helper function. In most cases, callers will want to use 'find_relevant_papers( query, max_results )' instead.\n\n Args:\n query (str): The search query.\n max_results (int, optional): The maximum number of search results to return. Defaults to 10.\n\n Returns:\n jresults (list): A list of dictionaries. Each dictionary contains fields such as 'title', 'authors', 'summary', and 'pdf_url'\n\n Example:\n >>> results = search_arxiv(\"attention is all you need\")\n >>> print(results)\n \"\"\"\n\n import arxiv\n\n key = hashlib.md5((\"search_arxiv(\" + str(max_results) + \")\" + query).encode(\"utf-8\")).hexdigest()\n # Create the cache if it doesn't exist\n cache_dir = \".cache\"\n if not os.path.isdir(cache_dir):\n os.mkdir(cache_dir)\n\n fname = os.path.join(cache_dir, key + \".cache\")\n\n # Cache hit\n if os.path.isfile(fname):\n fh = open(fname, \"r\", encoding=\"utf-8\")\n data = json.loads(fh.read())\n fh.close()\n return data\n\n # Normalize the query, removing operator keywords\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s(and|or|not)\\s\", \" \", \" \" + query + \" \")\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s+\", \" \", query).strip()\n\n search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n\n jresults = list()\n for result in search.results():\n r = dict()\n r[\"entry_id\"] = result.entry_id\n r[\"updated\"] = str(result.updated)\n r[\"published\"] = str(result.published)\n r[\"title\"] = result.title\n r[\"authors\"] = [str(a) for a in result.authors]\n r[\"summary\"] = result.summary\n r[\"comment\"] = result.comment\n r[\"journal_ref\"] = result.journal_ref\n r[\"doi\"] = result.doi\n r[\"primary_category\"] = result.primary_category\n r[\"categories\"] = result.categories\n r[\"links\"] = [str(link) for link in result.links]\n r[\"pdf_url\"] = result.pdf_url\n jresults.append(r)\n\n if len(jresults) > max_results:\n jresults = jresults[0:max_results]\n\n # Save to cache\n fh = open(fname, \"w\")\n fh.write(json.dumps(jresults))\n fh.close()\n return jresults\n", + "file_name": "find_papers_arxiv.py" + }, + { + "title": "generate_images", + "description": "This skill generates images from a given query using OpenAI's DALL-E model and saves them to disk.", + "content": "from typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\n\nfrom openai import OpenAI\n\n\ndef generate_and_save_images(query: str, image_size: str = \"1024x1024\") -> List[str]:\n \"\"\"\n Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image.\n\n :param query: A natural language description of the image to be generated.\n :param image_size: The size of the image to be generated. (default is \"1024x1024\")\n :return: A list of filenames for the saved images.\n \"\"\"\n\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n", + "file_name": "generate_images.py" + } + ], "config": { - "name": "visualization_assistant", + "name": "primary_assistant", "llm_config": { "config_list": [ { @@ -179,11 +302,11 @@ "cache_seed": null }, "human_input_mode": "NEVER", - "max_consecutive_auto_reply": 4, - "system_message": "Your task is to ensure you generate a high quality visualization for the user. Your visualizations must follow best practices and you must articulate your reasoning for your choices. The visualization must not have grid or outline box. The visualization should have an APPROPRIATE ASPECT RATIO e..g rectangular for time series data. The title must be bold. Importantly, if THE CHART IS A LINE CHART, you MUST ADD A LINE OF BEST FIT and ADD TEXT ON THE SLOPE OF EACH LINE. Note that today's date is 12/10/2023. At each point, do your best to determine if the user's request has been addressed and if so, respond with a summary. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request '. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." + "max_consecutive_auto_reply": 15, + "system_message": "You are a helpful assistant that can use available functions when needed to solve problems. At each point, do your best to determine if the user's request has been addressed. IF THE REQUEST HAS NOT BEEN ADDRESSED, RESPOND WITH CODE TO ADDRESS IT. IF A FAILURE OCCURRED (e.g., due to a missing library) AND SOME ADDITIONAL CODE WAS WRITTEN (e.g. code to install the library), ENSURE THAT THE ORIGINAL CODE TO ADDRESS THE TASK STILL GETS EXECUTED. If the request HAS been addressed, respond with a summary of the result. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request ' or 'The tallest mountain in Africa is ..' etc. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." } }, - "type": "default" + "type": "twoagents" } ] } diff --git a/samples/apps/autogen-studio/autogenstudio/utils/dbutils.py b/samples/apps/autogen-studio/autogenstudio/utils/dbutils.py index 17d05f73744a..d1891081b3c5 100644 --- a/samples/apps/autogen-studio/autogenstudio/utils/dbutils.py +++ b/samples/apps/autogen-studio/autogenstudio/utils/dbutils.py @@ -4,7 +4,23 @@ import threading import os from typing import Any, List, Dict, Optional, Tuple -from ..datamodel import AgentFlowSpec, AgentWorkFlowConfig, Gallery, Message, Session, Skill +from ..datamodel import AgentFlowSpec, AgentWorkFlowConfig, Gallery, Message, Model, Session, Skill + + +MODELS_TABLE_SQL = """ + CREATE TABLE IF NOT EXISTS models ( + id TEXT NOT NULL, + user_id TEXT NOT NULL, + timestamp DATETIME NOT NULL, + model TEXT, + api_key TEXT, + base_url TEXT, + api_type TEXT, + api_version TEXT, + description TEXT, + UNIQUE (id, user_id) + ) + """ MESSAGES_TABLE_SQL = """ @@ -136,7 +152,10 @@ def init_db(self, path: str = "database.sqlite", **kwargs: Any) -> None: self.conn = sqlite3.connect(path, check_same_thread=False, **kwargs) self.cursor = self.conn.cursor() - # Create the table with the specified columns, appropriate data types, and a UNIQUE constraint on (root_msg_id, msg_id) + # Create the models table + self.cursor.execute(MODELS_TABLE_SQL) + + # Create the messages table self.cursor.execute(MESSAGES_TABLE_SQL) # Create a sessions table @@ -160,6 +179,24 @@ def init_db(self, path: str = "database.sqlite", **kwargs: Any) -> None: data = json.load(json_file) skills = data["skills"] agents = data["agents"] + models = data["models"] + for model in models: + model = Model(**model) + self.cursor.execute( + "INSERT INTO models (id, user_id, timestamp, model, api_key, base_url, api_type, api_version, description) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", + ( + model.id, + "default", + model.timestamp, + model.model, + model.api_key, + model.base_url, + model.api_type, + model.api_version, + model.description, + ), + ) + for skill in skills: skill = Skill(**skill) @@ -229,7 +266,7 @@ def query(self, query: str, args: Tuple = (), return_json: bool = False) -> List def commit(self) -> None: """ - Commits the current transaction to the database. + Commits the current transaction Modelto the database. """ self.conn.commit() @@ -240,6 +277,96 @@ def close(self) -> None: self.conn.close() +def get_models(user_id: str, dbmanager: DBManager) -> List[dict]: + """ + Get all models for a given user from the database. + + Args: + user_id: The user id to get models for + dbmanager: The DBManager instance to interact with the database + + Returns: + A list of model configurations + """ + query = "SELECT * FROM models WHERE user_id = ? OR user_id = ?" + args = (user_id, "default") + results = dbmanager.query(query, args, return_json=True) + return results + + +def upsert_model(model: Model, dbmanager: DBManager) -> List[dict]: + """ + Insert or update a model configuration in the database. + + Args: + model: The Model object containing model configuration data + dbmanager: The DBManager instance to interact with the database + + Returns: + A list of model configurations + """ + + # Check if the model config with the provided id already exists in the database + existing_model = get_item_by_field("models", "id", model.id, dbmanager) + + if existing_model: + # If the model config exists, update it with the new data + updated_data = { + "model": model.model, + "api_key": model.api_key, + "base_url": model.base_url, + "api_type": model.api_type, + "api_version": model.api_version, + "user_id": model.user_id, + "timestamp": model.timestamp, + "description": model.description, + } + update_item("models", model.id, updated_data, dbmanager) + else: + # If the model config does not exist, insert a new one + query = """ + INSERT INTO models (id, user_id, timestamp, model, api_key, base_url, api_type, api_version, description) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) + """ + args = ( + model.id, + model.user_id, + model.timestamp, + model.model, + model.api_key, + model.base_url, + model.api_type, + model.api_version, + model.description, + ) + dbmanager.query(query=query, args=args) + + # Return the inserted or updated model config + models = get_models(model.user_id, dbmanager) + return models + + +def delete_model(model: Model, dbmanager: DBManager) -> List[dict]: + """ + Delete a model configuration from the database where id = model.id and user_id = model.user_id. + + Args: + model: The Model object containing model configuration data + dbmanager: The DBManager instance to interact with the database + + Returns: + A list of model configurations + """ + + query = "DELETE FROM models WHERE id = ? AND user_id = ?" + args = (model.id, model.user_id) + dbmanager.query(query=query, args=args) + + # Return the remaining model configs + models = get_models(model.user_id, dbmanager) + return models + + def create_message(message: Message, dbmanager: DBManager) -> None: """ Save a message in the database using the provided database manager. @@ -615,6 +742,8 @@ def upsert_workflow(workflow: AgentWorkFlowConfig, dbmanager: DBManager) -> List """ existing_workflow = get_item_by_field("workflows", "id", workflow.id, dbmanager) + # print(workflow.receiver) + if existing_workflow: updated_data = { "user_id": workflow.user_id, diff --git a/samples/apps/autogen-studio/autogenstudio/utils/utils.py b/samples/apps/autogen-studio/autogenstudio/utils/utils.py index 4306c897cea5..ce2778f09f8e 100644 --- a/samples/apps/autogen-studio/autogenstudio/utils/utils.py +++ b/samples/apps/autogen-studio/autogenstudio/utils/utils.py @@ -4,6 +4,7 @@ from typing import List, Dict, Tuple, Union import os import shutil +from pathlib import Path import re import autogen from ..datamodel import AgentConfig, AgentFlowSpec, AgentWorkFlowConfig, LLMConfig, Skill @@ -25,6 +26,9 @@ def clear_folder(folder_path: str) -> None: :param folder_path: The path to the folder to clear. """ + # exit if the folder does not exist + if not os.path.exists(folder_path): + return for file in os.listdir(folder_path): file_path = os.path.join(folder_path, file) if os.path.isfile(file_path): @@ -181,7 +185,10 @@ def get_modified_files( shutil.copy2(file_path, dest_file_path) # Extract user id from the dest_dir and file path - uid = dest_dir.split("/")[-1] + + dest_dir_as_path = Path(dest_dir) + uid = dest_dir_as_path.name + relative_file_path = os.path.relpath(dest_file_path, start=dest_dir) file_type = get_file_type(dest_file_path) file_dict = { @@ -253,14 +260,9 @@ def get_skills_from_prompt(skills: List[Skill], work_dir: str) -> str: if not os.path.exists(work_dir): os.makedirs(work_dir) - # check if skills.py exist. if exists, append to the file, else create a new file and write to it - - if os.path.exists(os.path.join(work_dir, "skills.py")): - with open(os.path.join(work_dir, "skills.py"), "a", encoding="utf-8") as f: - f.write(prompt) - else: - with open(os.path.join(work_dir, "skills.py"), "w", encoding="utf-8") as f: - f.write(prompt) + # overwrite skills.py in work_dir + with open(os.path.join(work_dir, "skills.py"), "w", encoding="utf-8") as f: + f.write(prompt) return instruction + prompt diff --git a/samples/apps/autogen-studio/autogenstudio/version.py b/samples/apps/autogen-studio/autogenstudio/version.py index 39d2c6a40f65..51feb3765672 100644 --- a/samples/apps/autogen-studio/autogenstudio/version.py +++ b/samples/apps/autogen-studio/autogenstudio/version.py @@ -1,3 +1,4 @@ -VERSION = "0.0.18a" +VERSION = "0.0.33a" __version__ = VERSION +__db_version__ = "0.0.1" APP_NAME = "autogenstudio" diff --git a/samples/apps/autogen-studio/autogenstudio/web/app.py b/samples/apps/autogen-studio/autogenstudio/web/app.py index 161aae91aa3f..6934c7e8aaf0 100644 --- a/samples/apps/autogen-studio/autogenstudio/web/app.py +++ b/samples/apps/autogen-studio/autogenstudio/web/app.py @@ -5,7 +5,7 @@ from fastapi.middleware.cors import CORSMiddleware from fastapi.staticfiles import StaticFiles from fastapi import HTTPException - +from ..version import VERSION from ..datamodel import ( ChatWebRequestModel, @@ -37,15 +37,16 @@ ) -root_file_path = os.path.dirname(os.path.abspath(__file__)) +root_file_path = os.environ.get("AUTOGENSTUDIO_APPDIR") or os.path.dirname(os.path.abspath(__file__)) # init folders skills, workdir, static, files etc folders = init_webserver_folders(root_file_path) +ui_folder_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "ui") api = FastAPI(root_path="/api") # mount an api route such that the main route serves the ui and the /api app.mount("/api", api) -app.mount("/", StaticFiles(directory=folders["static_folder_root"], html=True), name="ui") +app.mount("/", StaticFiles(directory=ui_folder_path, html=True), name="ui") api.mount("/files", StaticFiles(directory=folders["files_static_root"], html=True), name="files") @@ -345,6 +346,66 @@ async def delete_user_agent(req: DBWebRequestModel): } +@api.get("/models") +async def get_user_models(user_id: str): + try: + models = dbutils.get_models(user_id, dbmanager=dbmanager) + + return { + "status": True, + "message": "Models retrieved successfully", + "data": models, + } + except Exception as ex_error: + print(ex_error) + return { + "status": False, + "message": "Error occurred while retrieving models: " + str(ex_error), + } + + +@api.post("/models") +async def create_user_models(req: DBWebRequestModel): + """Create a new model for a user""" + + try: + models = dbutils.upsert_model(model=req.model, dbmanager=dbmanager) + + return { + "status": True, + "message": "Model created successfully", + "data": models, + } + + except Exception as ex_error: + print(traceback.format_exc()) + return { + "status": False, + "message": "Error occurred while creating model: " + str(ex_error), + } + + +@api.delete("/models/delete") +async def delete_user_model(req: DBWebRequestModel): + """Delete a model for a user""" + + try: + models = dbutils.delete_model(model=req.model, dbmanager=dbmanager) + + return { + "status": True, + "message": "Model deleted successfully", + "data": models, + } + + except Exception as ex_error: + print(traceback.format_exc()) + return { + "status": False, + "message": "Error occurred while deleting model: " + str(ex_error), + } + + @api.get("/workflows") async def get_user_workflows(user_id: str): try: @@ -401,3 +462,12 @@ async def delete_user_workflow(req: DBWebRequestModel): "status": False, "message": "Error occurred while deleting workflow: " + str(ex_error), } + + +@api.get("/version") +async def get_version(): + return { + "status": True, + "message": "Version retrieved successfully", + "data": {"version": VERSION}, + } diff --git a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py index a28bc9a7dd34..62c3b977c1d6 100644 --- a/samples/apps/autogen-studio/autogenstudio/workflowmanager.py +++ b/samples/apps/autogen-studio/autogenstudio/workflowmanager.py @@ -1,7 +1,7 @@ from typing import List, Optional from dataclasses import asdict import autogen -from .datamodel import AgentFlowSpec, AgentWorkFlowConfig, Message +from .datamodel import AgentConfig, AgentFlowSpec, AgentWorkFlowConfig, GroupChatConfig, Message from .utils import get_skills_from_prompt, clear_folder from datetime import datetime @@ -31,7 +31,9 @@ def __init__( if clear_work_dir: clear_folder(self.work_dir) + # given the config, return an AutoGen agent object self.sender = self.load(config.sender) + # given the config, return an AutoGen agent object self.receiver = self.load(config.receiver) self.agent_history = [] @@ -42,10 +44,17 @@ def process_reply(self, recipient, messages, sender, config): if "callback" in config and config["callback"] is not None: callback = config["callback"] callback(sender, recipient, messages[-1]) + last_message = messages[-1] + + sender = sender.name + recipient = recipient.name + if "name" in last_message: + sender = last_message["name"] + iteration = { - "sender": sender.name, - "recipient": recipient.name, - "message": messages[-1], + "recipient": recipient, + "sender": sender, + "message": last_message, "timestamp": datetime.now().isoformat(), } self.agent_history.append(iteration) @@ -102,7 +111,7 @@ def sanitize_agent_spec(self, agent_spec: AgentFlowSpec) -> AgentFlowSpec: """ agent_spec.config.is_termination_msg = agent_spec.config.is_termination_msg or ( - lambda x: "TERMINATE" in x.get("content", "").rstrip() + lambda x: "TERMINATE" in x.get("content", "").rstrip()[-20:] ) skills_prompt = "" if agent_spec.skills: @@ -135,16 +144,41 @@ def load(self, agent_spec: AgentFlowSpec) -> autogen.Agent: Returns: An instance of the loaded agent. """ - agent: autogen.Agent - agent_spec = self.sanitize_agent_spec(agent_spec) - if agent_spec.type == "assistant": - agent = autogen.AssistantAgent(**asdict(agent_spec.config)) + + if agent_spec.type == "groupchat": + agents = [ + self.load(self.sanitize_agent_spec(agent_config)) for agent_config in agent_spec.groupchat_config.agents + ] + group_chat_config = agent_spec.groupchat_config.dict() + group_chat_config["agents"] = agents + groupchat = autogen.GroupChat(**group_chat_config) + manager = autogen.GroupChatManager(groupchat=groupchat, **agent_spec.config.dict()) + return manager + + else: + agent_spec = self.sanitize_agent_spec(agent_spec) + agent = self.load_agent_config(agent_spec.config, agent_spec.type) + return agent + + def load_agent_config(self, agent_config: AgentConfig, agent_type: str) -> autogen.Agent: + """ + Loads an agent based on the provided agent configuration. + + Args: + agent_config: The configuration of the agent to be loaded. + agent_type: The type of the agent to be loaded. + + Returns: + An instance of the loaded agent. + """ + if agent_type == "assistant": + agent = autogen.AssistantAgent(**agent_config.dict()) agent.register_reply([autogen.Agent, None], reply_func=self.process_reply, config={"callback": None}) - elif agent_spec.type == "userproxy": - agent = autogen.UserProxyAgent(**asdict(agent_spec.config)) + elif agent_type == "userproxy": + agent = autogen.UserProxyAgent(**agent_config.dict()) agent.register_reply([autogen.Agent, None], reply_func=self.process_reply, config={"callback": None}) else: - raise ValueError(f"Unknown agent type: {agent_spec.type}") + raise ValueError(f"Unknown agent type: {agent_type}") return agent def run(self, message: str, clear_history: bool = False) -> None: diff --git a/samples/apps/autogen-studio/frontend/gatsby-config.ts b/samples/apps/autogen-studio/frontend/gatsby-config.ts index 047412b52470..923e531f5155 100644 --- a/samples/apps/autogen-studio/frontend/gatsby-config.ts +++ b/samples/apps/autogen-studio/frontend/gatsby-config.ts @@ -7,7 +7,7 @@ require("dotenv").config({ const config: GatsbyConfig = { pathPrefix: `${process.env.PREFIX_PATH_VALUE}`, siteMetadata: { - title: `AutoGen Studio`, + title: `AutoGen Studio [Beta]`, description: `Build Multi-Agent Apps`, siteUrl: `http://tbd.place`, }, diff --git a/samples/apps/autogen-studio/frontend/src/components/atoms.tsx b/samples/apps/autogen-studio/frontend/src/components/atoms.tsx index 8cb0dd0ea9de..dfdc3b4bc63d 100644 --- a/samples/apps/autogen-studio/frontend/src/components/atoms.tsx +++ b/samples/apps/autogen-studio/frontend/src/components/atoms.tsx @@ -7,10 +7,23 @@ import { PlusIcon, ArrowPathIcon, ArrowDownRightIcon, + PencilIcon, + UserGroupIcon, + UsersIcon, } from "@heroicons/react/24/outline"; import React, { ReactNode, useEffect, useRef, useState } from "react"; import Icon from "./icons"; -import { Button, Input, Modal, Select, Slider, Tooltip, message } from "antd"; +import { + Button, + Dropdown, + Input, + MenuProps, + Modal, + Select, + Slider, + Tooltip, + message, +} from "antd"; import remarkGfm from "remark-gfm"; import ReactMarkdown from "react-markdown"; import { atomDark } from "react-syntax-highlighter/dist/esm/styles/prism"; @@ -19,6 +32,7 @@ import { fetchJSON, getServerUrl, truncateText } from "./utils"; import { IAgentFlowSpec, IFlowConfig, + IGroupChatFlowSpec, IModelConfig, ISkill, IStatus, @@ -27,6 +41,7 @@ import { ResizableBox } from "react-resizable"; import debounce from "lodash.debounce"; import TextArea from "antd/es/input/TextArea"; import { appContext } from "../hooks/provider"; +import Item from "antd/es/list/Item"; interface CodeProps { node?: any; @@ -603,17 +618,27 @@ export const ModelSelector = ({ null ); const [editIndex, setEditIndex] = useState(null); + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + + const [models, setModels] = useState([]); + const serverUrl = getServerUrl(); + + const { user } = React.useContext(appContext); + const listModelsUrl = `${serverUrl}/models?user_id=${user?.email}`; const sanitizeModelConfig = (config: IModelConfig) => { const sanitizedConfig: IModelConfig = { model: config.model }; if (config.api_key) sanitizedConfig.api_key = config.api_key; if (config.base_url) sanitizedConfig.base_url = config.base_url; if (config.api_type) sanitizedConfig.api_type = config.api_type; + if (config.api_version) sanitizedConfig.api_version = config.api_version; return sanitizedConfig; }; const handleRemoveConfig = (index: number) => { const updatedConfigs = configs.filter((_, i) => i !== index); + setConfigs(updatedConfigs); }; @@ -623,6 +648,78 @@ export const ModelSelector = ({ setIsModalVisible(true); }; + const fetchModels = () => { + setError(null); + setLoading(true); + // const fetch; + const payLoad = { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + }; + + const onSuccess = (data: any) => { + if (data && data.status) { + // message.success(data.message); + setModels(data.data); + } else { + message.error(data.message); + } + setLoading(false); + }; + const onError = (err: any) => { + setError(err); + message.error(err.message); + setLoading(false); + }; + fetchJSON(listModelsUrl, payLoad, onSuccess, onError); + }; + + useEffect(() => { + fetchModels(); + }, []); + + const modelItems: MenuProps["items"] = + models.length > 0 + ? models.map((model: IModelConfig, index: number) => ({ + key: index, + label: model.model, + value: index, + })) + : [ + { + key: -1, + label: "No models found", + value: 0, + }, + ]; + + const modelOnClick: MenuProps["onClick"] = ({ key }) => { + const selectedIndex = parseInt(key.toString()); + let selectedModel = models[selectedIndex]; + selectedModel = sanitizeModelConfig(selectedModel); + const updatedConfigs = [...configs, selectedModel]; + setConfigs(updatedConfigs); + }; + + const AddModelsDropDown = () => { + return ( + +
+ add +
+
+ ); + }; + const handleOk = () => { if (newModelConfig?.model.trim()) { const sanitizedConfig = sanitizeModelConfig(newModelConfig); @@ -665,6 +762,7 @@ export const ModelSelector = ({ return (
showModal(config, i)} > @@ -692,18 +790,7 @@ export const ModelSelector = ({
{modelButtons} -
- showModal( - { model: "", api_key: "", base_url: "", api_type: "" }, - null - ) - } - > - add -
+
{ + if (key === "llm_config") { + if (value.config_list.length === 0) { + value = false; + } + } const updatedFlowSpec = { ...localFlowSpec, config: { ...localFlowSpec.config, [key]: value }, }; + console.log(updatedFlowSpec.config.llm_config); setLocalFlowSpec(updatedFlowSpec); setFlowSpec(updatedFlowSpec); }; - const onDebouncedControlChange = React.useCallback( - debounce((value: any, key: string) => { - onControlChange(value, key); - }, 3000), - [onControlChange] - ); - const llm_config = localFlowSpec.config.llm_config || { config_list: [] }; return ( <> -
{title}
- +
{title}
+ { + onChange={(value: any) => { onControlChange(value, "max_consecutive_auto_reply"); }} /> @@ -971,8 +1057,8 @@ export const AgentFlowSpecView = ({ options={ [ { label: "NEVER", value: "NEVER" }, - { label: "TERMINATE", value: "TERMINATE" }, - { label: "ALWAYS", value: "ALWAYS" }, + // { label: "TERMINATE", value: "TERMINATE" }, + // { label: "ALWAYS", value: "ALWAYS" }, ] as any } /> @@ -991,7 +1077,8 @@ export const AgentFlowSpecView = ({ value={flowSpec.config.system_message} rows={3} onChange={(e) => { - onDebouncedControlChange(e.target.value, "system_message"); + // onDebouncedControlChange(e.target.value, "system_message"); + onControlChange(e.target.value, "system_message"); }} /> } @@ -1262,6 +1349,341 @@ export const SkillLoader = ({ ); }; +const GroupChatFlowSpecView = ({ + flowSpec, + setFlowSpec, + flowSpecs, +}: { + flowSpec: IGroupChatFlowSpec | null; + setFlowSpec: (flowSpec: IGroupChatFlowSpec | null) => void; + flowSpecs: IAgentFlowSpec[]; +}) => { + const [showAgentModal, setShowAgentModal] = React.useState(false); + const [selectedAgent, setSelectedAgent] = React.useState(null); + + const handleRemoveAgent = (index: number) => { + const updatedAgents = flowSpec?.groupchat_config.agents.filter( + (_, i) => i !== index + ); + if (flowSpec?.groupchat_config && updatedAgents) { + setFlowSpec({ + ...flowSpec, + groupchat_config: { + ...flowSpec?.groupchat_config, + agents: updatedAgents, + }, + }); + } + }; + + const handleAddAgent = (agent: IAgentFlowSpec) => { + if (flowSpec?.groupchat_config && flowSpec?.groupchat_config.agents) { + const updatedAgents = [...flowSpec?.groupchat_config.agents, agent]; + if (flowSpec?.groupchat_config) { + setFlowSpec({ + ...flowSpec, + groupchat_config: { + ...flowSpec?.groupchat_config, + agents: updatedAgents, + }, + }); + } + } + }; + + const handleAgentUpdate = (updatedAgent: IAgentFlowSpec, index: number) => { + const updatedAgents = flowSpec?.groupchat_config.agents.map((agent, i) => { + if (i === index) { + return updatedAgent; + } + return agent; + }); + if (flowSpec?.groupchat_config && updatedAgents) { + setFlowSpec({ + ...flowSpec, + groupchat_config: { + ...flowSpec?.groupchat_config, + agents: updatedAgents, + }, + }); + } + }; + + const agentItems: MenuProps["items"] = flowSpecs.map( + (flowSpec: IAgentFlowSpec, index: number) => ({ + key: index, + label: flowSpec.config.name, + value: index, + }) + ); + + const agentOnClick: MenuProps["onClick"] = ({ key }) => { + const selectedIndex = parseInt(key.toString()); + const selectedAgent = flowSpecs[selectedIndex]; + handleAddAgent(selectedAgent); + }; + + const AgentDropDown = () => { + return ( + +
+ add +
+
+ ); + }; + + const agentsView = flowSpec?.groupchat_config.agents.map( + (flowSpec: IAgentFlowSpec, index: number) => { + const tooltipText = `Agent: ${flowSpec?.config.name}`; + return ( +
{ + setSelectedAgent(index); + // setShowAgentModal(true); + }} + > +
+ {" "} + +
{flowSpec.config.name}
{" "} +
+
{ + e.stopPropagation(); + handleRemoveAgent(index); + }} + className="ml-1 text-primary hover:text-accent duration-300" + > + +
+
+
+ ); + } + ); + + useEffect(() => { + if (selectedAgent !== null) { + // showAgentModal = true; + setShowAgentModal(true); + } + }, [selectedAgent]); + + return ( +
+ {showAgentModal && + selectedAgent !== null && + flowSpec?.groupchat_config && ( + { + if (agent) { + handleAgentUpdate(agent, selectedAgent); + console.log("updating agent at index", selectedAgent, agent); + } + setSelectedAgent(null); + }} + /> + )} + +
+ {agentsView} + +
+
+
+ ); +}; + +const AgentModal = ({ + agent, + showAgentModal, + setShowAgentModal, + handler, +}: { + agent: IAgentFlowSpec | null; + showAgentModal: boolean; + setShowAgentModal: (show: boolean) => void; + handler?: (agent: IAgentFlowSpec | null) => void; +}) => { + const [localAgent, setLocalAgent] = React.useState( + agent + ); + const [selectedFlowSpec, setSelectedFlowSpec] = useState(0); + + const serverUrl = getServerUrl(); + const { user } = React.useContext(appContext); + const listAgentsUrl = `${serverUrl}/agents?user_id=${user?.email}`; + + const [flowSpecs, setFlowSpecs] = useState([]); + useEffect(() => { + fetchAgents(); + }, []); + + const fetchAgents = () => { + const onSuccess = (data: any) => { + if (data && data.status) { + setFlowSpecs(data.data); + } + }; + const onError = (err: any) => { + console.error(err); + }; + const payLoad = { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + }; + fetchJSON(listAgentsUrl, payLoad, onSuccess, onError); + }; + + const handleAgentChange = (value: any) => { + setSelectedFlowSpec(value); + setLocalAgent(flowSpecs[value]); + }; + + return ( + + Agent Specification{" "} + {agent?.config.name}{" "} + + } + width={800} + open={showAgentModal} + onOk={() => { + if (handler) { + handler(localAgent); + } + setShowAgentModal(false); + }} + onCancel={() => { + setShowAgentModal(false); + }} + > + {agent && ( + <> + {" "} +
+ Modify current agent{" "} +
+ {localAgent && localAgent.type === "groupchat" && ( +
+ {" "} + Group Chat + +
+ )} + {localAgent && ( + + )} + + )} + + {agent && agent.type !== "groupchat" && ( +
+ {" "} +
+
+ Or replace with an existing agent{" "} +
+
+ { + setLocalModel({ ...localModel, model: e.target.value }); + }} + /> + { + if (localModel) { + setLocalModel({ ...localModel, api_key: e.target.value }); + } + }} + /> + { + if (localModel) { + setLocalModel({ ...localModel, base_url: e.target.value }); + } + }} + /> + { + if (localModel) { + setLocalModel({ ...localModel, api_type: e.target.value }); + } + }} + /> + { + if (localModel) { + setLocalModel({ ...localModel, api_version: e.target.value }); + } + }} + /> +