Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added an agent description field distinct from the system_message. #736

Merged
merged 22 commits into from
Dec 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ class AssistantAgent(ConversableAgent):
Reply "TERMINATE" in the end when everything is done.
"""

DEFAULT_DESCRIPTION = "A helpful and general-purpose AI assistant that has strong language skills, Python skills, and Linux command line skills."

def __init__(
self,
name: str,
Expand All @@ -36,6 +38,7 @@ def __init__(
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "NEVER",
code_execution_config: Optional[Union[Dict, Literal[False]]] = False,
description: Optional[str] = None,
**kwargs,
):
"""
Expand Down Expand Up @@ -63,5 +66,12 @@ def __init__(
human_input_mode,
code_execution_config=code_execution_config,
llm_config=llm_config,
description=description,
**kwargs,
)

# Update the provided desciption if None, and we are using the default system_message,
# then use the default description.
if description is None:
if system_message == self.DEFAULT_SYSTEM_MESSAGE:
self.description = self.DEFAULT_DESCRIPTION
4 changes: 4 additions & 0 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ def __init__(
code_execution_config: Optional[Union[Dict, Literal[False]]] = None,
llm_config: Optional[Union[Dict, Literal[False]]] = None,
default_auto_reply: Optional[Union[str, Dict, None]] = "",
description: Optional[str] = None,
):
"""
Args:
Expand Down Expand Up @@ -95,11 +96,14 @@ def __init__(
for available options.
To disable llm-based auto reply, set to False.
default_auto_reply (str or dict or None): default auto reply when no code execution or llm-based reply is generated.
description (str): a short description of the agent. This description is used by other agents
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
"""
super().__init__(name)
# a dictionary of conversations, default value is list
self._oai_messages = defaultdict(list)
self._oai_system_message = [{"content": system_message, "role": "system"}]
self.description = description if description is not None else system_message
afourney marked this conversation as resolved.
Show resolved Hide resolved
self._is_termination_msg = (
is_termination_msg
if is_termination_msg is not None
Expand Down
24 changes: 11 additions & 13 deletions autogen/agentchat/groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,13 +80,17 @@ def next_agent(self, agent: Agent, agents: List[Agent]) -> Agent:
return self.agents[(offset + i) % len(self.agents)]

def select_speaker_msg(self, agents: List[Agent]) -> str:
"""Return the message for selecting the next speaker."""
"""Return the system message for selecting the next speaker. This is always the *first* message in the context."""
return f"""You are in a role play game. The following roles are available:
{self._participant_roles(agents)}.

Read the following conversation.
Then select the next role from {[agent.name for agent in agents]} to play. Only return the role."""

def select_speaker_prompt(self, agents: List[Agent]) -> str:
"""Return the floating system prompt selecting the next speaker. This is always the *last* message in the context."""
return f"Read the above conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role."

def manual_select_speaker(self, agents: List[Agent]) -> Union[Agent, None]:
"""Manually select the next speaker."""

Expand Down Expand Up @@ -170,15 +174,9 @@ def select_speaker(self, last_speaker: Agent, selector: ConversableAgent):

# auto speaker selection
selector.update_system_message(self.select_speaker_msg(agents))
final, name = selector.generate_oai_reply(
self.messages
+ [
{
"role": "system",
"content": f"Read the above conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role.",
}
]
)
context = self.messages + [{"role": "system", "content": self.select_speaker_prompt(agents)}]
final, name = selector.generate_oai_reply(context)

if not final:
# the LLM client is None, thus no reply is generated. Use round robin instead.
return self.next_agent(last_speaker, agents)
Expand All @@ -205,11 +203,11 @@ def _participant_roles(self, agents: List[Agent] = None) -> str:

roles = []
for agent in agents:
if content_str(agent.system_message).strip() == "":
if agent.description.strip() == "":
logger.warning(
f"The agent '{agent.name}' has an empty system_message, and may not work well with GroupChat."
f"The agent '{agent.name}' has an empty description, and may not work well with GroupChat."
)
roles.append(f"{agent.name}: {agent.system_message}")
roles.append(f"{agent.name}: {agent.description}".strip())
return "\n".join(roles)

def _mentioned_agents(self, message_content: Union[str, List], agents: List[Agent]) -> Dict:
Expand Down
31 changes: 22 additions & 9 deletions autogen/agentchat/user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,13 @@ class UserProxyAgent(ConversableAgent):
To customize the initial message when a conversation starts, override `generate_init_message` method.
"""

# Default UserProxyAgent.description values, based on human_input_mode
DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS = {
"ALWAYS": "An attentive HUMAN user who can answer questions about the task, and can perform tasks such as running Python code or inputting command line commands at a Linux terminal and reporting back the execution results.",
"TERMINATE": "A user that can run Python code or input command line commands at a Linux terminal and report back the execution results.",
"NEVER": "A user that can run Python code or input command line commands at a Linux terminal and report back the execution results.",
}

def __init__(
self,
name: str,
Expand All @@ -27,6 +34,7 @@ def __init__(
default_auto_reply: Optional[Union[str, Dict, None]] = "",
llm_config: Optional[Union[Dict, Literal[False]]] = False,
system_message: Optional[Union[str, List]] = "",
description: Optional[str] = None,
):
"""
Args:
Expand Down Expand Up @@ -69,15 +77,20 @@ def __init__(
Default to false, which disables llm-based auto reply.
system_message (str or List): system message for ChatCompletion inference.
Only used when llm_config is not False. Use it to reprogram the agent.
afourney marked this conversation as resolved.
Show resolved Hide resolved
description (str): a short description of the agent. This description is used by other agents
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
"""
super().__init__(
name,
system_message,
is_termination_msg,
max_consecutive_auto_reply,
human_input_mode,
function_map,
code_execution_config,
llm_config,
default_auto_reply,
name=name,
system_message=system_message,
is_termination_msg=is_termination_msg,
max_consecutive_auto_reply=max_consecutive_auto_reply,
human_input_mode=human_input_mode,
function_map=function_map,
code_execution_config=code_execution_config,
llm_config=llm_config,
default_auto_reply=default_auto_reply,
description=description
if description is not None
else self.DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS[human_input_mode],
)
21 changes: 21 additions & 0 deletions test/agentchat/test_conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,27 @@ def test_conversable_agent():
with pytest.raises(KeyError):
dummy_agent_1.last_message(dummy_agent_3)

# Check the description field
assert dummy_agent_1.description != dummy_agent_1.system_message
assert dummy_agent_2.description == dummy_agent_2.system_message

dummy_agent_4 = ConversableAgent(
name="dummy_agent_4",
system_message="The fourth dummy agent used for testing.",
llm_config=False,
human_input_mode="TERMINATE",
)
assert dummy_agent_4.description == "The fourth dummy agent used for testing." # Same as system message

dummy_agent_5 = ConversableAgent(
name="dummy_agent_5",
system_message="",
description="The fifth dummy agent used for testing.",
llm_config=False,
human_input_mode="TERMINATE",
)
assert dummy_agent_5.description == "The fifth dummy agent used for testing." # Same as system message


def test_generate_reply():
def add_num(num_to_be_added):
Expand Down
Loading