Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added an agent description field distinct from the system_message. #736

Merged
merged 22 commits into from
Dec 9, 2023
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ class AssistantAgent(ConversableAgent):
Reply "TERMINATE" in the end when everything is done.
"""

DEFAULT_DESCRIPTION = "A helpful and general-purpose AI assistant that has strong language skills, Python skills, and Linux command line skills."

def __init__(
self,
name: str,
Expand All @@ -35,6 +37,7 @@ def __init__(
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "NEVER",
code_execution_config: Optional[Union[Dict, Literal[False]]] = False,
description: Optional[str] = None,
**kwargs,
):
"""
Expand Down Expand Up @@ -62,5 +65,12 @@ def __init__(
human_input_mode,
code_execution_config=code_execution_config,
llm_config=llm_config,
description=description,
**kwargs,
)

# Update the provided desciption if None, and we are using the default system_message,
# then use the default description.
if description is None:
if system_message == self.DEFAULT_SYSTEM_MESSAGE:
self.description = self.DEFAULT_DESCRIPTION
49 changes: 49 additions & 0 deletions autogen/agentchat/contrib/group_chat_moderator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
from typing import Callable, Dict, Optional, Union, Tuple, List, Any
from autogen import GroupChat, Agent
import logging

logger = logging.getLogger(__name__)


class GroupChatModerator(GroupChat):
"""(Experimental) A variation of the standard GroupChat class, but with an alternate prompting strategy
that focus on conversation moderation rather than role play. A drop-in replacement for GroupChat."""

def __init__(
self,
agents: List[Agent],
messages: List[Dict],
max_round: int = 10,
admin_name: str = "Admin",
func_call_filter: bool = True,
speaker_selection_method: str = "auto",
allow_repeat_speaker: bool = True,
):
"""
GroupChatModerator uses the same initilization and constructor as GroupChat.
Please refer to the GroupChat constructor for more information.
"""
super().__init__(
agents=agents,
messages=messages,
max_round=max_round,
admin_name=admin_name,
func_call_filter=func_call_filter,
speaker_selection_method=speaker_selection_method,
allow_repeat_speaker=allow_repeat_speaker,
)

def select_speaker_msg(self, agents: List[Agent]):
"""Return the system message for selecting the next speaker. This is always the *first* message in the context."""
return f"""You are moderating a conversation between the following participants:

{self._participant_roles(agents)}

Read the following conversation, then carefully consider who should speak next based on who's input would be most valued in this moment (e.g., to make the most progress on the task). Speakers do not need equal speaking time. You may even ignore non-relevant participants. Your focus is on efficiently driving progress toward task completion.

You must select only one speaker to go next, and you must only return their name (i.e., from the set {[agent.name for agent in agents]})
"""

def select_speaker_prompt(self, agents: List[Agent]):
"""Return the floating system prompt selecting the next speaker. This is always the *last* message in the context."""
return f"Read the above conversation, then carefully consider who should speak next based on who's input would be most valued in this moment to make progress on the task. Select the next speaker from {[agent.name for agent in agents]}. Only return their name."
4 changes: 4 additions & 0 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ def __init__(
code_execution_config: Optional[Union[Dict, Literal[False]]] = None,
llm_config: Optional[Union[Dict, Literal[False]]] = None,
default_auto_reply: Optional[Union[str, Dict, None]] = "",
description: Optional[str] = None,
):
"""
Args:
Expand Down Expand Up @@ -99,11 +100,14 @@ def __init__(
for available options.
To disable llm-based auto reply, set to False.
default_auto_reply (str or dict or None): default auto reply when no code execution or llm-based reply is generated.
description (str): a short description of the agent. This description is used by other agents
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
"""
super().__init__(name)
# a dictionary of conversations, default value is list
self._oai_messages = defaultdict(list)
self._oai_system_message = [{"content": system_message, "role": "system"}]
self.description = description if description is not None else system_message
afourney marked this conversation as resolved.
Show resolved Hide resolved
self._is_termination_msg = (
is_termination_msg if is_termination_msg is not None else (lambda x: x.get("content") == "TERMINATE")
)
Expand Down
25 changes: 12 additions & 13 deletions autogen/agentchat/groupchat.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import logging
import sys
import random
import json
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
import re
Expand Down Expand Up @@ -65,13 +66,17 @@ def next_agent(self, agent: Agent, agents: List[Agent]) -> Agent:
return self.agents[(offset + i) % len(self.agents)]

def select_speaker_msg(self, agents: List[Agent]):
afourney marked this conversation as resolved.
Show resolved Hide resolved
"""Return the message for selecting the next speaker."""
"""Return the system message for selecting the next speaker. This is always the *first* message in the context."""
return f"""You are in a role play game. The following roles are available:
{self._participant_roles(agents)}.

Read the following conversation.
Then select the next role from {[agent.name for agent in agents]} to play. Only return the role."""

def select_speaker_prompt(self, agents: List[Agent]):
afourney marked this conversation as resolved.
Show resolved Hide resolved
"""Return the floating system prompt selecting the next speaker. This is always the *last* message in the context."""
return f"Read the above conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role."

def manual_select_speaker(self, agents: List[Agent]) -> Agent:
"""Manually select the next speaker."""

Expand Down Expand Up @@ -155,15 +160,9 @@ def select_speaker(self, last_speaker: Agent, selector: ConversableAgent):

# auto speaker selection
selector.update_system_message(self.select_speaker_msg(agents))
final, name = selector.generate_oai_reply(
self.messages
+ [
{
"role": "system",
"content": f"Read the above conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role.",
}
]
)
context = self.messages + [{"role": "system", "content": self.select_speaker_prompt(agents)}]
final, name = selector.generate_oai_reply(context)

if not final:
# the LLM client is None, thus no reply is generated. Use round robin instead.
return self.next_agent(last_speaker, agents)
Expand All @@ -190,11 +189,11 @@ def _participant_roles(self, agents: List[Agent] = None) -> str:

roles = []
for agent in agents:
if agent.system_message.strip() == "":
if agent.description.strip() == "":
logger.warning(
f"The agent '{agent.name}' has an empty system_message, and may not work well with GroupChat."
f"The agent '{agent.name}' has an empty description, and may not work well with GroupChat."
)
roles.append(f"{agent.name}: {agent.system_message}")
roles.append(f"{agent.name}: {agent.description}".strip())
return "\n".join(roles)

def _mentioned_agents(self, message_content: str, agents: List[Agent]) -> Dict:
Expand Down
31 changes: 22 additions & 9 deletions autogen/agentchat/user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,13 @@ class UserProxyAgent(ConversableAgent):
To customize the initial message when a conversation starts, override `generate_init_message` method.
"""

# Default UserProxyAgent.description values, based on human_input_mode
DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS = {
"ALWAYS": "An attentive HUMAN user who can answer questions about the task, and can perform tasks such as running Python code or inputting command line commands at a Linux terminal and reporting back the execution results.",
"TERMINATE": "A user that can run Python code or input command line commands at a Linux terminal and report back the execution results.",
"NEVER": "A user that can run Python code or input command line commands at a Linux terminal and report back the execution results.",
}

def __init__(
self,
name: str,
Expand All @@ -26,6 +33,7 @@ def __init__(
default_auto_reply: Optional[Union[str, Dict, None]] = "",
llm_config: Optional[Union[Dict, Literal[False]]] = False,
system_message: Optional[str] = "",
description: Optional[str] = None,
):
"""
Args:
Expand Down Expand Up @@ -68,15 +76,20 @@ def __init__(
Default to false, which disables llm-based auto reply.
system_message (str): system message for ChatCompletion inference.
Only used when llm_config is not False. Use it to reprogram the agent.
afourney marked this conversation as resolved.
Show resolved Hide resolved
description (str): a short description of the agent. This description is used by other agents
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
"""
super().__init__(
name,
system_message,
is_termination_msg,
max_consecutive_auto_reply,
human_input_mode,
function_map,
code_execution_config,
llm_config,
default_auto_reply,
name=name,
system_message=system_message,
is_termination_msg=is_termination_msg,
max_consecutive_auto_reply=max_consecutive_auto_reply,
human_input_mode=human_input_mode,
function_map=function_map,
code_execution_config=code_execution_config,
llm_config=llm_config,
default_auto_reply=default_auto_reply,
description=description
if description is not None
else self.DEFAULT_USER_PROXY_AGENT_DESCRIPTIONS[human_input_mode],
)
58 changes: 58 additions & 0 deletions test/agentchat/contrib/test_group_chat_moderator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import pytest
import autogen

from autogen.agentchat.contrib.group_chat_moderator import GroupChatModerator


def test_moderation_prompt():
agent1 = autogen.ConversableAgent(
"alice",
system_message="You are Alice, a helpful AI assistant.",
max_consecutive_auto_reply=2,
human_input_mode="NEVER",
llm_config=False,
default_auto_reply="This is alice speaking.",
)
agent2 = autogen.ConversableAgent(
"bob",
description="You are Bob, a helpful AI assistant.",
max_consecutive_auto_reply=2,
human_input_mode="NEVER",
llm_config=False,
default_auto_reply="This is bob speaking.",
)
agent3 = autogen.ConversableAgent(
"sam",
max_consecutive_auto_reply=2,
human_input_mode="NEVER",
llm_config=False,
default_auto_reply="This is sam speaking.",
)
agents = [agent1, agent2, agent3]
groupchat = GroupChatModerator(agents=agents, messages=[], max_round=2)

system_prompt = groupchat.select_speaker_msg(agents)

# Make sure it contains the text we expect.
assert (
"Read the following conversation, then carefully consider who should speak next based on who's input would be most valued in this moment (e.g., to make the most progress on the task)."
in system_prompt
)

# Make sure expected prompt or descriptions are present.
assert "You are Alice, a helpful AI assistant." in system_prompt # provided prompt
assert "You are Bob, a helpful AI assistant." in system_prompt # provided description
assert "You are a helpful AI Assistant" in system_prompt # default prompt

selection_prompt = groupchat.select_speaker_prompt(agents)
assert (
"Read the above conversation, then carefully consider who should speak next based on who's input would be most valued in this moment to make progress on the task."
in selection_prompt
)
assert "alice" in selection_prompt
assert "bob" in selection_prompt
assert "sam" in selection_prompt


if __name__ == "__main__":
test_moderation_prompt()
21 changes: 21 additions & 0 deletions test/agentchat/test_conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,27 @@ def test_conversable_agent():
with pytest.raises(KeyError):
dummy_agent_1.last_message(dummy_agent_3)

# Check the description field
assert dummy_agent_1.description != dummy_agent_1.system_message
assert dummy_agent_2.description == dummy_agent_2.system_message

dummy_agent_4 = ConversableAgent(
name="dummy_agent_4",
system_message="The fourth dummy agent used for testing.",
llm_config=False,
human_input_mode="TERMINATE",
)
assert dummy_agent_4.description == "The fourth dummy agent used for testing." # Same as system message

dummy_agent_5 = ConversableAgent(
name="dummy_agent_5",
system_message="",
description="The fifth dummy agent used for testing.",
llm_config=False,
human_input_mode="TERMINATE",
)
assert dummy_agent_5.description == "The fifth dummy agent used for testing." # Same as system message


def test_generate_reply():
def add_num(num_to_be_added):
Expand Down
Loading