Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

consecutive auto reply, history, template, group chat, class-specific reply #1165

Merged
merged 15 commits into from
Aug 3, 2023
12 changes: 10 additions & 2 deletions flaml/autogen/agentchat/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,14 @@
from .agent import Agent
from .assistant_agent import AssistantAgent
from .responsive_agent import ResponsiveAgent
from .assistant_agent import AssistantAgent
from .user_proxy_agent import UserProxyAgent
from .groupchat import GroupChatManager, GroupChatParticipant

__all__ = ["Agent", "ResponsiveAgent", "AssistantAgent", "UserProxyAgent"]
__all__ = [
"Agent",
"ResponsiveAgent",
"AssistantAgent",
"UserProxyAgent",
"GroupChatManager",
"GroupChatParticipant",
]
2 changes: 0 additions & 2 deletions flaml/autogen/agentchat/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,12 @@ def reset(self):
def generate_reply(
self,
messages: Optional[List[Dict]] = None,
default_reply: Optional[Union[str, Dict]] = "",
sender: Optional["Agent"] = None,
) -> Union[str, Dict, None]:
"""(Abstract method) Generate a reply based on the received messages.

Args:
messages (list[dict]): a list of messages received.
default_reply (str or dict): the default reply if no other reply is generated.
sender: sender of an Agent instance.
Returns:
str or dict or None: the generated reply. If None, no reply is generated.
Expand Down
11 changes: 5 additions & 6 deletions flaml/autogen/agentchat/contrib/math_user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def __init__(
default_auto_reply=default_auto_reply,
**kwargs,
)

self.register_auto_reply(Agent, self._generate_math_reply)
# fixed var
self._max_invalid_q_per_step = max_invalid_q_per_step

Expand Down Expand Up @@ -276,12 +276,11 @@ def execute_one_wolfram_query(self, query: str):
is_success = False
return output, is_success

def generate_reply(
def _generate_math_reply(
self,
messages: Optional[List[Dict]] = None,
default_reply: Optional[Union[str, Dict]] = DEFAULT_REPLY,
sender: Optional[Agent] = None,
) -> Union[str, Dict, None]:
):
"""Generate an auto reply."""
if messages is None:
messages = self._oai_messages[sender.name]
Expand All @@ -291,7 +290,7 @@ def generate_reply(

if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
# no code block is found, lang should be `UNKNOWN``
return default_reply
return True, self._default_auto_reply
is_success, all_success = True, True
reply = ""
for code_block in code_blocks:
Expand Down Expand Up @@ -323,7 +322,7 @@ def generate_reply(
self._accum_invalid_q_per_step = 0
reply = "Please revisit the problem statement and your reasoning. If you think this step is correct, solve it yourself and continue the next step. Otherwise, correct this step."

return reply
return True, reply


# Modified based on langchain. Langchain is licensed under MIT License:
Expand Down
143 changes: 143 additions & 0 deletions flaml/autogen/agentchat/groupchat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
import sys
from typing import Dict, List, Optional, Tuple, Union
from .agent import Agent
from .responsive_agent import ResponsiveAgent


class GroupChatManager(ResponsiveAgent):
"""(WIP) A chat manager agent that can manage a group chat of multiple agents."""

agents: List["GroupChatParticipant"]
max_round: int

def _participant_roles(self):
return "\n".join([f"{agent.name}: {agent.system_message}" for agent in self.agents])

def _select_speaker_msg(self):
return {
"role": "system",
"content": f"""You are in a role play game. The following roles are available:
{self._participant_roles()}. Read the following conversation.
Then select the next role from {self._agent_names} to play. Only return the role.""",
}

def __init__(
self,
max_round: Optional[int] = 10,
name: Optional[str] = "chat_manager",
# unlimited consecutive auto reply by default
max_consecutive_auto_reply: Optional[int] = sys.maxsize,
human_input_mode: Optional[str] = "NEVER",
# seed: Optional[int] = 4,
**kwargs,
):
super().__init__(
name=name,
max_consecutive_auto_reply=max_consecutive_auto_reply,
human_input_mode=human_input_mode,
**kwargs,
)
self.register_auto_reply(GroupChatParticipant, self._generate_reply_for_participant)
self.max_round = max_round
self._agent_names = []
self._next_speaker = None
self._round = 0
self._messages = []
# self._random = random.Random(seed)

def _generate_reply_for_participant(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
) -> Union[str, Dict, None]:
if messages is None:
messages = self._oai_messages[sender.name]
message = messages[-1]
# set the name to sender's name if the role is not function
if message["role"] != "function":
message["name"] = sender.name
self._messages.append(message)
self._next_speaker = None
# broadcast the message to all agents except the sender
for agent in self.agents:
if agent != sender:
self.send(message, agent)
if self._round == 0:
self._agent_names = [agent.name for agent in self.agents]
self._round += 1
if self._round >= self.max_round:
return True, None
# speaker selection msg from an agent
self._next_speaker = self._select_speaker(sender)
self._next_speaker.send(self._next_speaker.generate_reply(sender=self), self)
return True, None

@property
def next_speaker(self):
"""Return the next speaker."""
return self._next_speaker

def _select_speaker(self, last_speaker: "GroupChatParticipant"):
"""Select the next speaker."""
final, name = self._generate_oai_reply([self._select_speaker_msg()] + self._messages)
if not final:
# i = self._random.randint(0, len(self._agent_names) - 1) # randomly pick an id
name = self._agent_names[(self._agent_names.index(last_speaker.name) + 1) % len(self._agent_names)]
return self.agent_by_name(name)

def agent_by_name(self, name: str) -> "GroupChatParticipant":
"""Find the next speaker based on the message."""
return self.agents[self._agent_names.index(name)]

def reset(self):
super().reset()
self._round = 0
self._messages.clear()
self._next_speaker = None


class GroupChatParticipant(ResponsiveAgent):
"""(WIP) A group chat participant agent that can participate in a group chat."""

group_chat_manager: GroupChatManager

def __init__(
self,
name,
group_chat_manager=None,
**kwargs,
):
super().__init__(
name=name,
**kwargs,
)
self.register_auto_reply(GroupChatManager, self._generate_reply_for_chat_manager)
self.group_chat_manager = group_chat_manager

def _generate_reply_for_chat_manager(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
"""Generate reply for the chat manager."""
return self.group_chat_manager.next_speaker != self, None


# def _speaker_selection(self, instruction):
# """Select the next speaker."""
# if self.llm_config is False:
# if self.human_input_mode == "NEVER":
# return self.name
# else:
# return self.get_human_input(instruction["content"])
# sender = self.chat_manager.room
# roles_msg = {
# "content": f"""The following roles are available:
# {self._participant_roles()}""",
# "role": "system",
# }
# old_system_msg = self.system_message
# self.update_system_message(instruction["content"])
# reply = self._generate_oai_reply([roles_msg] + self.chat_messages[sender.name])
# self.update_system_message(old_system_msg)
# return reply
Loading
Loading