diff --git a/autogen/agentchat/assistant_agent.py b/autogen/agentchat/assistant_agent.py index 55230708f67a..ce7836da1666 100644 --- a/autogen/agentchat/assistant_agent.py +++ b/autogen/agentchat/assistant_agent.py @@ -27,6 +27,8 @@ class AssistantAgent(ConversableAgent): Reply "TERMINATE" in the end when everything is done. """ + DEFAULT_DESCRIPTION = "A helpful and general-purpose AI assistant that has strong language skills, Python skills, and Linux command line skills." + def __init__( self, name: str, @@ -36,6 +38,7 @@ def __init__( max_consecutive_auto_reply: Optional[int] = None, human_input_mode: Optional[str] = "NEVER", code_execution_config: Optional[Union[Dict, Literal[False]]] = False, + description: Optional[str] = None, **kwargs, ): """ @@ -63,5 +66,12 @@ def __init__( human_input_mode, code_execution_config=code_execution_config, llm_config=llm_config, + description=description, **kwargs, ) + + # Update the provided desciption if None, and we are using the default system_message, + # then use the default description. + if description is None: + if system_message == self.DEFAULT_SYSTEM_MESSAGE: + self.description = self.DEFAULT_DESCRIPTION diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 7a268823a78c..baa79645b1ab 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -54,6 +54,7 @@ def __init__( code_execution_config: Optional[Union[Dict, Literal[False]]] = None, llm_config: Optional[Union[Dict, Literal[False]]] = None, default_auto_reply: Optional[Union[str, Dict, None]] = "", + description: Optional[str] = None, ): """ Args: @@ -95,11 +96,14 @@ def __init__( for available options. To disable llm-based auto reply, set to False. default_auto_reply (str or dict or None): default auto reply when no code execution or llm-based reply is generated. + description (str): a short description of the agent. This description is used by other agents + (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message) """ super().__init__(name) # a dictionary of conversations, default value is list self._oai_messages = defaultdict(list) self._oai_system_message = [{"content": system_message, "role": "system"}] + self.description = description if description is not None else system_message self._is_termination_msg = ( is_termination_msg if is_termination_msg is not None diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 295954ac8028..1326a7e4f8ce 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -80,13 +80,17 @@ def next_agent(self, agent: Agent, agents: List[Agent]) -> Agent: return self.agents[(offset + i) % len(self.agents)] def select_speaker_msg(self, agents: List[Agent]) -> str: - """Return the message for selecting the next speaker.""" + """Return the system message for selecting the next speaker. This is always the *first* message in the context.""" return f"""You are in a role play game. The following roles are available: {self._participant_roles(agents)}. Read the following conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role.""" + def select_speaker_prompt(self, agents: List[Agent]) -> str: + """Return the floating system prompt selecting the next speaker. This is always the *last* message in the context.""" + return f"Read the above conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role." + def manual_select_speaker(self, agents: List[Agent]) -> Union[Agent, None]: """Manually select the next speaker.""" @@ -170,15 +174,9 @@ def select_speaker(self, last_speaker: Agent, selector: ConversableAgent): # auto speaker selection selector.update_system_message(self.select_speaker_msg(agents)) - final, name = selector.generate_oai_reply( - self.messages - + [ - { - "role": "system", - "content": f"Read the above conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role.", - } - ] - ) + context = self.messages + [{"role": "system", "content": self.select_speaker_prompt(agents)}] + final, name = selector.generate_oai_reply(context) + if not final: # the LLM client is None, thus no reply is generated. Use round robin instead. return self.next_agent(last_speaker, agents) @@ -205,11 +203,11 @@ def _participant_roles(self, agents: List[Agent] = None) -> str: roles = [] for agent in agents: - if content_str(agent.system_message).strip() == "": + if agent.description.strip() == "": logger.warning( - f"The agent '{agent.name}' has an empty system_message, and may not work well with GroupChat." + f"The agent '{agent.name}' has an empty description, and may not work well with GroupChat." ) - roles.append(f"{agent.name}: {agent.system_message}") + roles.append(f"{agent.name}: {agent.description}".strip()) return "\n".join(roles) def _mentioned_agents(self, message_content: Union[str, List], agents: List[Agent]) -> Dict: diff --git a/autogen/agentchat/user_proxy_agent.py b/autogen/agentchat/user_proxy_agent.py index b658aed4b39f..4d053f67d913 100644 --- a/autogen/agentchat/user_proxy_agent.py +++ b/autogen/agentchat/user_proxy_agent.py @@ -16,6 +16,13 @@ class UserProxyAgent(ConversableAgent): To customize the initial message when a conversation starts, override `generate_init_message` method. """ + # Default UserProxyAgent.description values, based on human_input_mode + DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS = { + "ALWAYS": "An attentive HUMAN user who can answer questions about the task, and can perform tasks such as running Python code or inputting command line commands at a Linux terminal and reporting back the execution results.", + "TERMINATE": "A user that can run Python code or input command line commands at a Linux terminal and report back the execution results.", + "NEVER": "A user that can run Python code or input command line commands at a Linux terminal and report back the execution results.", + } + def __init__( self, name: str, @@ -27,6 +34,7 @@ def __init__( default_auto_reply: Optional[Union[str, Dict, None]] = "", llm_config: Optional[Union[Dict, Literal[False]]] = False, system_message: Optional[Union[str, List]] = "", + description: Optional[str] = None, ): """ Args: @@ -69,15 +77,20 @@ def __init__( Default to false, which disables llm-based auto reply. system_message (str or List): system message for ChatCompletion inference. Only used when llm_config is not False. Use it to reprogram the agent. + description (str): a short description of the agent. This description is used by other agents + (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message) """ super().__init__( - name, - system_message, - is_termination_msg, - max_consecutive_auto_reply, - human_input_mode, - function_map, - code_execution_config, - llm_config, - default_auto_reply, + name=name, + system_message=system_message, + is_termination_msg=is_termination_msg, + max_consecutive_auto_reply=max_consecutive_auto_reply, + human_input_mode=human_input_mode, + function_map=function_map, + code_execution_config=code_execution_config, + llm_config=llm_config, + default_auto_reply=default_auto_reply, + description=description + if description is not None + else self.DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS[human_input_mode], ) diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index d38193338f4c..839a598b2dae 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -275,6 +275,27 @@ def test_conversable_agent(): with pytest.raises(KeyError): dummy_agent_1.last_message(dummy_agent_3) + # Check the description field + assert dummy_agent_1.description != dummy_agent_1.system_message + assert dummy_agent_2.description == dummy_agent_2.system_message + + dummy_agent_4 = ConversableAgent( + name="dummy_agent_4", + system_message="The fourth dummy agent used for testing.", + llm_config=False, + human_input_mode="TERMINATE", + ) + assert dummy_agent_4.description == "The fourth dummy agent used for testing." # Same as system message + + dummy_agent_5 = ConversableAgent( + name="dummy_agent_5", + system_message="", + description="The fifth dummy agent used for testing.", + llm_config=False, + human_input_mode="TERMINATE", + ) + assert dummy_agent_5.description == "The fifth dummy agent used for testing." # Same as system message + def test_generate_reply(): def add_num(num_to_be_added):