Skip to content

Commit

Permalink
improve test speed (microsoft#2406)
Browse files Browse the repository at this point in the history
* improve test speed

* speed up test

* speed up test
  • Loading branch information
sonichi authored Apr 17, 2024
1 parent 166d564 commit 5d26276
Show file tree
Hide file tree
Showing 12 changed files with 116 additions and 124 deletions.
2 changes: 1 addition & 1 deletion test/agentchat/test_agent_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
filter_dict={
"model": ["gpt-4", "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
"model": ["gpt-3.5-turbo"],
},
file_location=KEY_LOC,
)
Expand Down
6 changes: 2 additions & 4 deletions test/agentchat/test_agent_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,15 +85,14 @@ def test_agent_usage():
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={"tags": ["gpt-3.5-turbo"]},
)
assistant = AssistantAgent(
"assistant",
system_message="You are a helpful assistant.",
llm_config={
"timeout": 600,
"cache_seed": None,
"config_list": config_list,
"model": "gpt-3.5-turbo-0613",
},
)

Expand All @@ -104,7 +103,6 @@ def test_agent_usage():
code_execution_config=False,
llm_config={
"config_list": config_list,
"model": "gpt-3.5-turbo-0613",
},
# In the system message the "user" always refers to the other agent.
system_message="You ask a user for help. You check the answer from the user and provide feedback.",
Expand Down Expand Up @@ -140,5 +138,5 @@ def test_agent_usage():


if __name__ == "__main__":
test_gathering()
# test_gathering()
test_agent_usage()
35 changes: 17 additions & 18 deletions test/agentchat/test_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,23 +9,16 @@
from autogen.agentchat import AssistantAgent, UserProxyAgent

sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from conftest import skip_openai # noqa: E402

try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
from conftest import reason, skip_openai # noqa: E402

KEY_LOC = "notebook"
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
here = os.path.abspath(os.path.dirname(__file__))


@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip,
reason="do not run on MacOS or windows OR openai not installed OR requested to skip",
sys.platform in ["darwin", "win32"] or skip_openai,
reason="do not run on MacOS or windows OR " + reason,
)
def test_ai_user_proxy_agent():
conversations = {}
Expand All @@ -34,6 +27,7 @@ def test_ai_user_proxy_agent():
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={"tags": ["gpt-3.5-turbo"]},
)
assistant = AssistantAgent(
"assistant",
Expand Down Expand Up @@ -67,7 +61,7 @@ def test_ai_user_proxy_agent():
print("Result summary:", res.summary)


@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
@pytest.mark.skipif(skip_openai, reason=reason)
def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
Expand Down Expand Up @@ -111,9 +105,13 @@ def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
assert not isinstance(user.use_docker, bool) # None or str


@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_create_execute_script(human_input_mode="NEVER", max_consecutive_auto_reply=10):
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, file_location=KEY_LOC)
@pytest.mark.skipif(skip_openai, reason=reason)
def test_create_execute_script(human_input_mode="NEVER", max_consecutive_auto_reply=3):
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={"tags": ["gpt-3.5-turbo"]},
)
conversations = {}
# autogen.ChatCompletion.start_logging(conversations)
llm_config = {
Expand Down Expand Up @@ -160,13 +158,13 @@ def test_create_execute_script(human_input_mode="NEVER", max_consecutive_auto_re
# autogen.ChatCompletion.stop_logging()


@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10):
@pytest.mark.skipif(skip_openai, reason=reason)
def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=2):
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
"model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
"tags": ["gpt-4", "gpt-4-32k"],
},
)
hard_questions = [
Expand Down Expand Up @@ -207,4 +205,5 @@ def tsp_message(sender, recipient, context):
# when GPT-4, i.e., the DEFAULT_MODEL, is used, conversation in the following test
# should terminate in 2-3 rounds of interactions (because is_termination_msg should be true after 2-3 rounds)
# although the max_consecutive_auto_reply is set to 10.
test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10)
test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=2)
# test_ai_user_proxy_agent()
40 changes: 13 additions & 27 deletions test/agentchat/test_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,7 @@
import autogen

sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from conftest import skip_openai # noqa: E402

try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai
from conftest import reason, skip_openai # noqa: E402


def get_market_news(ind, ind_upper):
Expand Down Expand Up @@ -61,24 +54,15 @@ def get_market_news(ind, ind_upper):
return feeds_summary


@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
@pytest.mark.skipif(skip_openai, reason=reason)
@pytest.mark.asyncio
async def test_async_groupchat():
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)

llm_config = {
"timeout": 600,
"cache_seed": 41,
"config_list": config_list,
"temperature": 0,
}
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})

# create an AssistantAgent instance named "assistant"
assistant = autogen.AssistantAgent(
name="assistant",
llm_config={
"timeout": 600,
"cache_seed": 41,
"config_list": config_list,
"temperature": 0,
},
Expand All @@ -93,20 +77,21 @@ async def test_async_groupchat():
default_auto_reply=None,
)

groupchat = autogen.GroupChat(agents=[user_proxy, assistant], messages=[], max_round=12)
groupchat = autogen.GroupChat(
agents=[user_proxy, assistant], messages=[], max_round=3, speaker_selection_method="round_robin"
)
manager = autogen.GroupChatManager(
groupchat=groupchat,
llm_config=llm_config,
is_termination_msg=lambda x: "TERMINATE" in x.get("content", ""),
)
await user_proxy.a_initiate_chat(manager, message="""Have a short conversation with the assistant.""")
await user_proxy.a_initiate_chat(manager, message="""223434*3422=?.""")
assert len(user_proxy.chat_messages) > 0


@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
@pytest.mark.skipif(skip_openai, reason=reason)
@pytest.mark.asyncio
async def test_stream():
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})
data = asyncio.Future()

async def add_stock_price_data():
Expand Down Expand Up @@ -167,9 +152,10 @@ async def add_data_reply(recipient, messages, sender, config):
while not data_task.done() and not data_task.cancelled():
reply = await user_proxy.a_generate_reply(sender=assistant)
if reply is not None:
res = await user_proxy.a_send(reply, assistant)
print("Chat summary and cost:", res.summary, res.cost)
await user_proxy.a_send(reply, assistant)
# print("Chat summary and cost:", res.summary, res.cost)


if __name__ == "__main__":
asyncio.run(test_stream())
# asyncio.run(test_stream())
asyncio.run(test_async_groupchat())
9 changes: 5 additions & 4 deletions test/agentchat/test_async_chats.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
@pytest.mark.asyncio
async def test_async_chats():
config_list = autogen.config_list_from_json(
config_list_35 = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={"tags": ["gpt-3.5-turbo"]},
)

financial_tasks = [
Expand All @@ -32,16 +33,16 @@ async def test_async_chats():

financial_assistant_1 = AssistantAgent(
name="Financial_assistant_1",
llm_config={"config_list": config_list},
llm_config={"config_list": config_list_35},
system_message="You are a knowledgeable AI Assistant. Reply TERMINATE when everything is done.",
)
financial_assistant_2 = AssistantAgent(
name="Financial_assistant_2",
llm_config={"config_list": config_list},
llm_config={"config_list": config_list_35},
)
writer = AssistantAgent(
name="Writer",
llm_config={"config_list": config_list},
llm_config={"config_list": config_list_35},
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
system_message="""
You are a professional writer, known for
Expand Down
21 changes: 7 additions & 14 deletions test/agentchat/test_async_get_human_input.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,26 +11,19 @@
import autogen

sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from conftest import skip_openai # noqa: E402
from conftest import reason, skip_openai # noqa: E402

try:
from openai import OpenAI
except ImportError:
skip = True
else:
skip = False or skip_openai


@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
@pytest.mark.skipif(skip_openai, reason=reason)
@pytest.mark.asyncio
async def test_async_get_human_input():
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})

# create an AssistantAgent instance named "assistant"
assistant = autogen.AssistantAgent(
name="assistant",
max_consecutive_auto_reply=2,
llm_config={"seed": 41, "config_list": config_list, "temperature": 0},
llm_config={"config_list": config_list, "temperature": 0},
)

user_proxy = autogen.UserProxyAgent(name="user", human_input_mode="ALWAYS", code_execution_config=False)
Expand All @@ -48,10 +41,10 @@ async def test_async_get_human_input():
print("Human input:", res.human_input)


@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
@pytest.mark.skipif(skip_openai, reason=reason)
@pytest.mark.asyncio
async def test_async_max_turn():
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})

# create an AssistantAgent instance named "assistant"
assistant = autogen.AssistantAgent(
Expand Down Expand Up @@ -79,5 +72,5 @@ async def test_async_max_turn():


if __name__ == "__main__":
asyncio.run(test_async_get_human_input())
# asyncio.run(test_async_get_human_input())
asyncio.run(test_async_max_turn())
Loading

0 comments on commit 5d26276

Please sign in to comment.