Skip to content

Commit

Permalink
Fix bug in async group chat with func/tool call (#1243)
Browse files Browse the repository at this point in the history
* test func/tool call in group chat w/ >2 agents

* filter_dict -> exclude

* modify comment

* add last message to groupchat

* typecheck allow_repeat_speaker
  • Loading branch information
sonichi authored and joshkyh committed Jan 17, 2024
1 parent 405f5c0 commit 902f970
Show file tree
Hide file tree
Showing 5 changed files with 104 additions and 96 deletions.
1 change: 0 additions & 1 deletion .github/workflows/openai.yml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ jobs:
OAI_CONFIG_LIST: ${{ secrets.OAI_CONFIG_LIST }}
run: |
pip install nbconvert nbformat ipykernel
coverage run -a -m pytest test/agentchat/test_function_call_groupchat.py
coverage run -a -m pytest test/test_notebook.py
coverage xml
cat "$(pwd)/test/executed_openai_notebook_output.txt"
Expand Down
68 changes: 18 additions & 50 deletions autogen/agentchat/groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,15 @@ def reset(self):
"""Reset the group chat."""
self.messages.clear()

def append(self, message: Dict):
def append(self, message: Dict, speaker: Agent):
"""Append a message to the group chat.
We cast the content to str here so that it can be managed by text-based
model.
"""
# set the name to speaker's name if the role is not function
# if the role is tool, it is OK to modify the name
if message["role"] != "function":
message["name"] = speaker.name
message["content"] = content_str(message["content"])
self.messages.append(message)

Expand Down Expand Up @@ -224,8 +228,10 @@ def manual_select_speaker(self, agents: Optional[List[Agent]] = None) -> Union[A
return None

def _prepare_and_select_agents(

self, last_speaker: Agent
) -> Tuple[Optional[Agent], List[Agent], Optional[List[Dict]]]:

) -> Tuple[Optional[Agent], List[Agent], Optional[List[Dict]], Optional[List[Dict]]]:
if self.speaker_selection_method.lower() not in self._VALID_SPEAKER_SELECTION_METHODS:
raise ValueError(
f"GroupChat speaker_selection_method is set to '{self.speaker_selection_method}'. "
Expand Down Expand Up @@ -328,34 +334,8 @@ def select_speaker(self, last_speaker: Agent, selector: ConversableAgent) -> Age
return selected_agent
# auto speaker selection
selector.update_system_message(self.select_speaker_msg(agents))

# If last message is a tool call or function call, blank the call so the api doesn't throw
messages = self.messages.copy()
if messages[-1].get("function_call", False):
messages[-1] = dict(messages[-1], function_call=None)
if messages[-1].get("tool_calls", False):
messages[-1] = dict(messages[-1], tool_calls=None)
context = messages + [{"role": "system", "content": self.select_speaker_prompt(agents)}]
final, name = selector.generate_oai_reply(context)

if not final:
# the LLM client is None, thus no reply is generated. Use round robin instead.
return self.next_agent(last_speaker, agents)

# If exactly one agent is mentioned, use it. Otherwise, leave the OAI response unmodified
mentions = self._mentioned_agents(name, agents)
if len(mentions) == 1:
name = next(iter(mentions))
else:
logger.warning(
f"GroupChat select_speaker failed to resolve the next speaker's name. This is because the speaker selection OAI call returned:\n{name}"
)

# Return the result
try:
return self.agent_by_name(name)
except ValueError:
return self.next_agent(last_speaker, agents)
final, name = selector.generate_oai_reply(messages)
return self._finalize_speaker(last_speaker, final, name, agents)

async def a_select_speaker(self, last_speaker: Agent, selector: ConversableAgent) -> Agent:
"""Select the next speaker."""
Expand All @@ -364,15 +344,10 @@ async def a_select_speaker(self, last_speaker: Agent, selector: ConversableAgent
return selected_agent
# auto speaker selection
selector.update_system_message(self.select_speaker_msg(agents))
final, name = await selector.a_generate_oai_reply(
self.messages
+ [
{
"role": "system",
"content": f"Read the above conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role.",
}
]
)
final, name = await selector.a_generate_oai_reply(messages)
return self._finalize_speaker(last_speaker, final, name, agents)

def _finalize_speaker(self, last_speaker: Agent, final: bool, name: str, agents: List[Agent]) -> Agent:
if not final:
# the LLM client is None, thus no reply is generated. Use round robin instead.
return self.next_agent(last_speaker, agents)
Expand Down Expand Up @@ -485,12 +460,7 @@ def run_chat(
speaker = sender
groupchat = config
for i in range(groupchat.max_round):
# set the name to speaker's name if the role is not function
if message["role"] != "function":
message["name"] = speaker.name

groupchat.append(message)

groupchat.append(message, speaker)
if self._is_termination_msg(message):
# The conversation is over
break
Expand Down Expand Up @@ -524,6 +494,8 @@ def run_chat(
# The speaker sends the message without requesting a reply
speaker.send(reply, self, request_reply=False)
message = self.last_message(speaker)
if i == groupchat.max_round - 1:
groupchat.append(message, speaker)
return True, None

async def a_run_chat(
Expand All @@ -539,11 +511,7 @@ async def a_run_chat(
speaker = sender
groupchat = config
for i in range(groupchat.max_round):
# set the name to speaker's name if the role is not function
if message["role"] != "function":
message["name"] = speaker.name

groupchat.append(message)
groupchat.append(message, speaker)

if self._is_termination_msg(message):
# The conversation is over
Expand Down
73 changes: 45 additions & 28 deletions test/agentchat/test_function_call_groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,22 +4,37 @@
import os
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST

sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from conftest import skip_openai # noqa: E402

try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from conftest import skip_openai as skip

func_def = {
"name": "get_random_number",
"description": "Get a random number between 0 and 100",
"parameters": {
"type": "object",
"properties": {},
},
}


@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
skip,
reason="do not run if openai is not installed or requested to skip",
)
def test_function_call_groupchat():
@pytest.mark.parametrize(
"key, value, sync",
[
("tools", [{"type": "function", "function": func_def}], False),
("functions", [func_def], True),
("tools", [{"type": "function", "function": func_def}], True),
],
)
def test_function_call_groupchat(key, value, sync):
import random

def get_random_number():
Expand All @@ -35,29 +50,31 @@ def get_random_number():
llm_config = {
"config_list": config_list_gpt4,
"cache_seed": 42,
"functions": [
{
"name": "get_random_number",
"description": "Get a random number between 0 and 100",
"parameters": {
"type": "object",
"properties": {},
},
},
],
key: value,
}
# llm_config without functions
llm_config_no_function = llm_config.copy()
del llm_config_no_function[key]

user_proxy = autogen.UserProxyAgent(
name="User_proxy",
system_message="A human admin that will execute function_calls.",
name="Executor",
description="An executor that will execute function_calls.",
function_map={"get_random_number": get_random_number},
human_input_mode="NEVER",
)
coder = autogen.AssistantAgent(
player = autogen.AssistantAgent(
name="Player",
system_message="You will can function `get_random_number` to get a random number. Stop only when you get at least 1 even number and 1 odd number. Reply TERMINATE to stop.",
system_message="You will use function `get_random_number` to get a random number. Stop only when you get at least 1 even number and 1 odd number. Reply TERMINATE to stop.",
description="A player that will make function_calls.",
llm_config=llm_config,
)
groupchat = autogen.GroupChat(agents=[user_proxy, coder], messages=[], max_round=7)
observer = autogen.AssistantAgent(
name="Observer",
system_message="You observe the conversation between the executor and the player. Summarize the conversation in 1 sentence.",
description="An observer that will observe the conversation.",
llm_config=llm_config_no_function,
)
groupchat = autogen.GroupChat(agents=[user_proxy, player, observer], messages=[], max_round=7)

# pass in llm_config with functions
with pytest.raises(
Expand All @@ -66,12 +83,12 @@ def get_random_number():
):
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)

# pass in llm_config without functions
llm_config_manager = llm_config.copy()
del llm_config_manager["functions"]
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config_manager)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config_no_function)

user_proxy.initiate_chat(manager, message="Let's start the game!")
if sync:
user_proxy.initiate_chat(manager, message="Let's start the game!")
else:
user_proxy.a_initiate_chat(manager, message="Let's start the game!")


def test_no_function_map():
Expand Down Expand Up @@ -103,4 +120,4 @@ def test_no_function_map():

if __name__ == "__main__":
test_function_call_groupchat()
test_no_function_map()
# test_no_function_map()
32 changes: 31 additions & 1 deletion test/agentchat/test_groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,35 @@ def _test_n_agents_less_than_3(method):
agent1.initiate_chat(group_chat_manager, message="This is alice speaking.")


def test_invalid_allow_repeat_speaker():
agent1 = autogen.ConversableAgent(
"alice",
max_consecutive_auto_reply=10,
human_input_mode="NEVER",
llm_config=False,
default_auto_reply="This is alice speaking.",
)
agent2 = autogen.ConversableAgent(
"bob",
max_consecutive_auto_reply=10,
human_input_mode="NEVER",
llm_config=False,
default_auto_reply="This is bob speaking.",
)
# test invalid allow_repeat_speaker
groupchat = autogen.GroupChat(
agents=[agent1, agent2],
messages=[],
max_round=6,
speaker_selection_method="round_robin",
allow_repeat_speaker={},
)
with pytest.raises(ValueError) as e:
group_chat_manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=False)
agent1.initiate_chat(group_chat_manager, message="This is alice speaking.")
assert str(e.value) == "GroupChat allow_repeat_speaker should be a bool or a list of Agents.", e.value


def test_n_agents_less_than_3():
for method in ["auto", "round_robin", "random", "RounD_roBin"]:
_test_n_agents_less_than_3(method)
Expand Down Expand Up @@ -536,8 +565,9 @@ def test_graceful_exit_before_max_round():
# test_broadcast()
# test_chat_manager()
# test_plugin()
test_speaker_selection_method()
# test_speaker_selection_method()
# test_n_agents_less_than_3()
# test_agent_mentions()
# test_termination()
# test_next_agent()
test_invalid_allow_repeat_speaker()
26 changes: 10 additions & 16 deletions test/agentchat/test_tool_calls.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,26 @@
try:
from openai import OpenAI
except ImportError:
OpenAI = None
import inspect
import pytest
import json
import sys
import os
import autogen
from conftest import skip_openai
from autogen.math_utils import eval_math_responses
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
import sys
from autogen.oai.client import TOOL_ENABLED

try:
from openai import OpenAI
except ImportError:
skip = True
skip_openai = True
else:
skip = False or skip_openai
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from conftest import skip_openai


@pytest.mark.skipif(skip_openai or not TOOL_ENABLED, reason="openai>=1.1.0 not installed or requested to skip")
def test_eval_math_responses():
config_list = autogen.config_list_from_models(
KEY_LOC, exclude="aoai", model_list=["gpt-4-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k"]
KEY_LOC, model_list=["gpt-4-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k"]
)
tools = [
{
Expand Down Expand Up @@ -80,10 +77,6 @@ def test_eval_math_responses_api_style_function():
config_list = autogen.config_list_from_models(
KEY_LOC,
model_list=["gpt-4-0613", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k"],
filter_dict={
"api_type": ["azure"],
"api_version": ["2023-10-01-preview", "2023-09-01-preview", "2023-08-01-preview", "2023-07-01-preview"],
},
)
functions = [
{
Expand Down Expand Up @@ -286,6 +279,7 @@ def receive(


if __name__ == "__main__":
test_update_tool()
test_eval_math_responses()
test_multi_tool_call()
# test_update_tool()
# test_eval_math_responses()
# test_multi_tool_call()
test_eval_math_responses_api_style_function()

0 comments on commit 902f970

Please sign in to comment.