Skip to content

Added an agent description field distinct from the system_message. #736

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 22 commits into from
Dec 9, 2023
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ class AssistantAgent(ConversableAgent):
Reply "TERMINATE" in the end when everything is done.
"""

DEFAULT_DESCRIPTION = "A helpful and general-purpose AI assistant that has strong language skills, Python skills, and Linux command line skills."

def __init__(
self,
name: str,
Expand All @@ -35,6 +37,7 @@ def __init__(
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "NEVER",
code_execution_config: Optional[Union[Dict, Literal[False]]] = False,
description: Optional[str] = None,
**kwargs,
):
"""
Expand Down Expand Up @@ -62,5 +65,12 @@ def __init__(
human_input_mode,
code_execution_config=code_execution_config,
llm_config=llm_config,
description=description,
**kwargs,
)

# Update the provided desciption if None, and we are using the default system_message,
# then use the default description.
if description is None:
if system_message == self.DEFAULT_SYSTEM_MESSAGE:
self.description = self.DEFAULT_DESCRIPTION
4 changes: 4 additions & 0 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,14 @@ def __init__(
code_execution_config: Optional[Union[Dict, Literal[False]]] = None,
llm_config: Optional[Union[Dict, Literal[False]]] = None,
default_auto_reply: Optional[Union[str, Dict, None]] = "",
description: Optional[str] = None,
):
"""
Args:
name (str): name of the agent.
system_message (str): system message for the ChatCompletion inference.
description (str): a short description of the agent. This description is used by other agents
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
is_termination_msg (function): a function that takes a message in the form of a dictionary
and returns a boolean value indicating if this received message is a termination message.
The dict can contain the following keys: "content", "role", "name", "function_call".
Expand Down Expand Up @@ -104,6 +107,7 @@ def __init__(
# a dictionary of conversations, default value is list
self._oai_messages = defaultdict(list)
self._oai_system_message = [{"content": system_message, "role": "system"}]
self.description = description if description is not None else system_message
self._is_termination_msg = (
is_termination_msg if is_termination_msg is not None else (lambda x: x.get("content") == "TERMINATE")
)
Expand Down
22 changes: 15 additions & 7 deletions autogen/agentchat/groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,14 @@ def next_agent(self, agent: Agent, agents: List[Agent]) -> Agent:

def select_speaker_msg(self, agents: List[Agent]):
"""Return the message for selecting the next speaker."""
return f"""You are in a role play game. The following roles are available:
{self._participant_roles(agents)}.
return f"""You are moderating a conversation between the following participants:

Read the following conversation.
Then select the next role from {[agent.name for agent in agents]} to play. Only return the role."""
{self._participant_roles(agents)}

Read the following conversation, then carefully consider who should speak next based on who's input would be most valued in this moment (e.g., to make the most progress on the task). Speakers do not need equal speaking time. You may even ignore non-relevant participants. Your focus is on efficiently driving progress toward task completion.

You must select only one speaker to go next, and you must only return their name (i.e., from the set {[agent.name for agent in agents]})
"""

def manual_select_speaker(self, agents: List[Agent]) -> Agent:
"""Manually select the next speaker."""
Expand Down Expand Up @@ -155,15 +158,20 @@ def select_speaker(self, last_speaker: Agent, selector: ConversableAgent):

# auto speaker selection
selector.update_system_message(self.select_speaker_msg(agents))
# logger.warning("GroupChat selection prompt:\n" + selector.system_message)

final, name = selector.generate_oai_reply(
self.messages
+ [
{
"role": "system",
"content": f"Read the above conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role.",
"content": f"Read the above conversation, then carefully consider who should speak next based on who's input would be most valued in this moment to make progress on the task. Select the next speaker from {[agent.name for agent in agents]}. Only return their name.",
}
]
)

# logger.warning("GroupChat selection result: " + name)

if not final:
# the LLM client is None, thus no reply is generated. Use round robin instead.
return self.next_agent(last_speaker, agents)
Expand Down Expand Up @@ -192,9 +200,9 @@ def _participant_roles(self, agents: List[Agent] = None) -> str:
for agent in agents:
if agent.system_message.strip() == "":
logger.warning(
f"The agent '{agent.name}' has an empty system_message, and may not work well with GroupChat."
f"The agent '{agent.name}' has an empty description, and may not work well with GroupChat."
)
roles.append(f"{agent.name}: {agent.system_message}")
roles.append(f"{agent.name}: {agent.description}".strip())
return "\n".join(roles)

def _mentioned_agents(self, message_content: str, agents: List[Agent]) -> Dict:
Expand Down
29 changes: 20 additions & 9 deletions autogen/agentchat/user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,13 @@ class UserProxyAgent(ConversableAgent):
To customize the initial message when a conversation starts, override `generate_init_message` method.
"""

# Default UserProxyAgent.description values, based on human_input_mode
DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS = {
"ALWAYS": "An attentive HUMAN user who can answer questions about the task, and can perform tasks such as running Python code or inputting command line commands at a Linux terminal and reporting back the execution results.",
"TERMINATE": "A user that can run Python code or input command line commands at a Linux terminal and report back the execution results.",
"NEVER": "A user that can run Python code or input command line commands at a Linux terminal and report back the execution results.",
}

def __init__(
self,
name: str,
Expand All @@ -26,6 +33,7 @@ def __init__(
default_auto_reply: Optional[Union[str, Dict, None]] = "",
llm_config: Optional[Union[Dict, Literal[False]]] = False,
system_message: Optional[str] = "",
description: Optional[str] = None,
):
"""
Args:
Expand Down Expand Up @@ -70,13 +78,16 @@ def __init__(
Only used when llm_config is not False. Use it to reprogram the agent.
"""
super().__init__(
name,
system_message,
is_termination_msg,
max_consecutive_auto_reply,
human_input_mode,
function_map,
code_execution_config,
llm_config,
default_auto_reply,
name=name,
system_message=system_message,
is_termination_msg=is_termination_msg,
max_consecutive_auto_reply=max_consecutive_auto_reply,
human_input_mode=human_input_mode,
function_map=function_map,
code_execution_config=code_execution_config,
llm_config=llm_config,
default_auto_reply=default_auto_reply,
description=description
if description is not None
else self.DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS[human_input_mode],
)
21 changes: 21 additions & 0 deletions test/agentchat/test_conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,27 @@ def test_conversable_agent():
with pytest.raises(KeyError):
dummy_agent_1.last_message(dummy_agent_3)

# Check the description field
assert dummy_agent_1.description != dummy_agent_1.system_message
assert dummy_agent_2.description == dummy_agent_2.system_message

dummy_agent_4 = ConversableAgent(
name="dummy_agent_4",
system_message="The fourth dummy agent used for testing.",
llm_config=False,
human_input_mode="TERMINATE",
)
assert dummy_agent_4.description == "The fourth dummy agent used for testing." # Same as system message

dummy_agent_5 = ConversableAgent(
name="dummy_agent_5",
system_message="",
description="The fifth dummy agent used for testing.",
llm_config=False,
human_input_mode="TERMINATE",
)
assert dummy_agent_5.description == "The fifth dummy agent used for testing." # Same as system message


def test_generate_reply():
def add_num(num_to_be_added):
Expand Down