Skip to content

Commit

Permalink
Added an 'auto' mode to last_n_messages (microsoft#693)
Browse files Browse the repository at this point in the history
* Added an 'auto' mode to last_n_messages

* Added tests for the last_n_messages = 'auto' mode

* Added test one additional test case that was overlooked.
  • Loading branch information
afourney authored Nov 15, 2023
1 parent fbb2897 commit d5deec2
Show file tree
Hide file tree
Showing 2 changed files with 81 additions and 2 deletions.
17 changes: 15 additions & 2 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def __init__(
When set to True, a default list will be used.
We strongly recommend using docker for code execution.
- timeout (Optional, int): The maximum execution time in seconds.
- last_n_messages (Experimental, Optional, int): The number of messages to look back for code execution. Default to 1.
- last_n_messages (Experimental, Optional, int or str): The number of messages to look back for code execution. Default to 1. If set to 'auto', it will scan backwards through all messages arriving since the agent last spoke (typically this is the last time execution was attempted).
llm_config (dict or False): llm inference configuration.
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
for available options.
Expand Down Expand Up @@ -635,10 +635,23 @@ def generate_code_execution_reply(
messages = self._oai_messages[sender]
last_n_messages = code_execution_config.pop("last_n_messages", 1)

messages_to_scan = last_n_messages
if last_n_messages == "auto":
# Find when the agent last spoke
messages_to_scan = 0
for i in range(len(messages)):
message = messages[-(i + 1)]
if "role" not in message:
break
elif message["role"] != "user":
break
else:
messages_to_scan += 1

# iterate through the last n messages reversly
# if code blocks are found, execute the code blocks and return the output
# if no code blocks are found, continue
for i in range(min(len(messages), last_n_messages)):
for i in range(min(len(messages), messages_to_scan)):
message = messages[-(i + 1)]
if not message["content"]:
continue
Expand Down
66 changes: 66 additions & 0 deletions test/agentchat/test_conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,71 @@ def test_generate_code_execution_reply():
)
assert agent._code_execution_config["last_n_messages"] == 3

# scenario 5: if last_n_messages is set to 'auto' and no code is found, then nothing breaks both when an assistant message is and isn't present
assistant_message_for_auto = {
"content": "This is me! The assistant!",
"role": "assistant",
}

dummy_messages_for_auto = []
for i in range(3):
dummy_messages_for_auto.append(
{
"content": "no code block",
"role": "user",
}
)

# Without an assistant present
agent._code_execution_config = {"last_n_messages": "auto", "use_docker": False}
assert agent.generate_code_execution_reply(dummy_messages_for_auto) == (
False,
None,
)

# With an assistant message present
agent._code_execution_config = {"last_n_messages": "auto", "use_docker": False}
assert agent.generate_code_execution_reply([assistant_message_for_auto] + dummy_messages_for_auto) == (
False,
None,
)

# scenario 6: if last_n_messages is set to 'auto' and code is found, then we execute it correctly
dummy_messages_for_auto = []
for i in range(4):
# Without an assistant present
agent._code_execution_config = {"last_n_messages": "auto", "use_docker": False}
assert agent.generate_code_execution_reply([code_message] + dummy_messages_for_auto) == (
True,
"exitcode: 0 (execution succeeded)\nCode output: \nhello world\n",
)

# With an assistant message present
agent._code_execution_config = {"last_n_messages": "auto", "use_docker": False}
assert agent.generate_code_execution_reply(
[assistant_message_for_auto] + [code_message] + dummy_messages_for_auto
) == (
True,
"exitcode: 0 (execution succeeded)\nCode output: \nhello world\n",
)

dummy_messages_for_auto.append(
{
"content": "no code block",
"role": "user",
}
)

# scenario 7: if last_n_messages is set to 'auto' and code is present, but not before an assistant message, then nothing happens
agent._code_execution_config = {"last_n_messages": "auto", "use_docker": False}
assert agent.generate_code_execution_reply(
[code_message] + [assistant_message_for_auto] + dummy_messages_for_auto
) == (
False,
None,
)
assert agent._code_execution_config["last_n_messages"] == "auto"


def test_max_consecutive_auto_reply():
agent = ConversableAgent("a0", max_consecutive_auto_reply=2, llm_config=False, human_input_mode="NEVER")
Expand Down Expand Up @@ -249,4 +314,5 @@ async def test_a_generate_reply_raises_on_messages_and_sender_none(conversable_a
# test_trigger()
# test_context()
# test_max_consecutive_auto_reply()
# test_generate_code_execution_reply()
test_conversable_agent()

0 comments on commit d5deec2

Please sign in to comment.