Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Error handling in getting LLM-based summary #1567

Merged
merged 5 commits into from
Feb 7, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -832,8 +832,10 @@ def _summarize_chat(
if not isinstance(prompt, str):
raise ValueError("The summary_prompt must be a string.")
msg_list = agent._groupchat.messages if hasattr(agent, "_groupchat") else agent.chat_messages[self]

summary = self._llm_response_preparer(prompt, msg_list, llm_agent=agent, cache=cache)
try:
summary = self._llm_response_preparer(prompt, msg_list, llm_agent=agent, cache=cache)
except Exception as e:
qingyun-wu marked this conversation as resolved.
Show resolved Hide resolved
warnings.warn(f"Cannot extract summary using reflection_with_llm: {e}", UserWarning)
else:
warnings.warn("No summary_method provided or summary_method is not supported: ")
return summary
Expand Down
81 changes: 78 additions & 3 deletions test/agentchat/test_chats.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,21 @@
import pytest
from conftest import skip_openai
import autogen
from typing import Literal

from pydantic import BaseModel, Field
from typing_extensions import Annotated

@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")

try:
import openai
except ImportError:
skip = True
else:
skip = False or skip_openai


@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_chats_group():
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
Expand Down Expand Up @@ -118,7 +130,7 @@ def test_chats_group():
print(all_res[manager_1].summary)


@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_chats():
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
Expand All @@ -127,6 +139,7 @@ def test_chats():

financial_tasks = [
"""What are the full names of NVDA and TESLA.""",
"""Investigate the reasons.""",
"""Pros and cons of the companies I'm interested in. Keep it short.""",
]

Expand Down Expand Up @@ -197,6 +210,68 @@ def test_chats():
# print(blogpost.summary, insights_and_blogpost)


@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_chats_w_func():
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
)

llm_config = {
"config_list": config_list,
"timeout": 120,
}

chatbot = autogen.AssistantAgent(
name="chatbot",
system_message="For currency exchange tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.",
llm_config=llm_config,
)

# create a UserProxyAgent instance named "user_proxy"
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
code_execution_config={
"last_n_messages": 1,
"work_dir": "tasks",
"use_docker": False,
},
)

CurrencySymbol = Literal["USD", "EUR"]

def exchange_rate(base_currency: CurrencySymbol, quote_currency: CurrencySymbol) -> float:
if base_currency == quote_currency:
return 1.0
elif base_currency == "USD" and quote_currency == "EUR":
return 1 / 1.1
elif base_currency == "EUR" and quote_currency == "USD":
return 1.1
else:
raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}")

@user_proxy.register_for_execution()
@chatbot.register_for_llm(description="Currency exchange calculator.")
def currency_calculator(
base_amount: Annotated[float, "Amount of currency in base_currency"],
base_currency: Annotated[CurrencySymbol, "Base currency"] = "USD",
quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR",
) -> str:
quote_amount = exchange_rate(base_currency, quote_currency) * base_amount
return f"{quote_amount} {quote_currency}"

res = user_proxy.initiate_chat(
chatbot,
message="How much is 123.45 USD in EUR?",
summary_method="reflection_with_llm",
)
print(res.summary, res.cost, res.chat_history)


if __name__ == "__main__":
# test_chats()
test_chats_group()
# test_chats_group()
test_chats_w_func()
Loading