Skip to content

Commit 5889640

Browse files
authored
do model check properly (microsoft#1686)
* do model check properly * bug in logging test * set llm_config
1 parent b43a118 commit 5889640

File tree

5 files changed

+40
-62
lines changed

5 files changed

+40
-62
lines changed

autogen/agentchat/conversable_agent.py

+7-5
Original file line numberDiff line numberDiff line change
@@ -144,11 +144,13 @@ def __init__(
144144
self.llm_config = self.DEFAULT_CONFIG.copy()
145145
if isinstance(llm_config, dict):
146146
self.llm_config.update(llm_config)
147-
# We still have a default `llm_config` because the user didn't
148-
# specify anything. This won't work, so raise an error to avoid
149-
# an obscure message from the OpenAI service.
150-
if self.llm_config == {}:
151-
raise ValueError("Please specify the value for 'llm_config'.")
147+
if "model" not in self.llm_config and (
148+
not self.llm_config.get("config_list")
149+
or any(not config.get("model") for config in self.llm_config["config_list"])
150+
):
151+
raise ValueError(
152+
"Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'."
153+
)
152154
self.client = OpenAIWrapper(**self.llm_config)
153155

154156
if logging_enabled():

autogen/oai/client.py

+4-14
Original file line numberDiff line numberDiff line change
@@ -361,27 +361,15 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base
361361
if logging_enabled():
362362
log_new_wrapper(self, locals())
363363
openai_config, extra_kwargs = self._separate_openai_config(base_config)
364-
# This *may* work if the `llm_config` has specified the `model` attribute,
365-
# so just warn here.
366-
if type(config_list) is list and len(config_list) == 0:
367-
logger.warning("OpenAI client was provided with an empty config_list, which may not be intended.")
368-
# If the `llm_config` has no `model` then the call will fail. Abort now.
369-
if "model" not in extra_kwargs:
370-
raise ValueError("Please specify a value for the 'model' in 'llm_config'.")
364+
# It's OK if "model" is not provided in base_config or config_list
365+
# Because one can provide "model" at `create` time.
371366

372367
self._clients: List[ModelClient] = []
373368
self._config_list: List[Dict[str, Any]] = []
374369

375370
if config_list:
376371
config_list = [config.copy() for config in config_list] # make a copy before modifying
377372
for config in config_list:
378-
# We require that each element of `config_list` has a non-empty value
379-
# for `model` specified unless `extra_kwargs` contains "model".
380-
model = None
381-
if "model" in config:
382-
model = config["model"]
383-
if "model" not in extra_kwargs and (model is None or len(model) == 0):
384-
raise ValueError("Please specify a non-empty 'model' value for every item in 'config_list'.")
385373
self._register_default_client(config, openai_config) # could modify the config
386374
self._config_list.append(
387375
{**extra_kwargs, **{k: v for k, v in config.items() if k not in self.openai_kwargs}}
@@ -610,6 +598,7 @@ def yes_or_no_filter(context, response):
610598

611599
if logging_enabled():
612600
# Log the cache hit
601+
# TODO: log the config_id and pass_filter etc.
613602
log_chat_completion(
614603
invocation_id=invocation_id,
615604
client_id=id(client),
@@ -671,6 +660,7 @@ def yes_or_no_filter(context, response):
671660
cache.set(key, response)
672661

673662
if logging_enabled():
663+
# TODO: log the config_id and pass_filter etc.
674664
log_chat_completion(
675665
invocation_id=invocation_id,
676666
client_id=id(client),

test/agentchat/contrib/test_compressible_agent.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -216,14 +216,14 @@ def test_mode_terminate():
216216
reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip",
217217
)
218218
def test_new_compressible_agent_description():
219-
assistant = CompressibleAgent(name="assistant", description="this is a description")
219+
assistant = CompressibleAgent(name="assistant", description="this is a description", llm_config=False)
220220

221221
assert assistant.description == "this is a description", "description is not set correctly"
222222

223223

224224
if __name__ == "__main__":
225-
test_mode_compress()
226-
test_mode_customized()
227-
test_compress_message()
228-
test_mode_terminate()
225+
# test_mode_compress()
226+
# test_mode_customized()
227+
# test_compress_message()
228+
# test_mode_terminate()
229229
test_new_compressible_agent_description()

test/agentchat/test_agent_logging.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ def test_groupchat_logging(db_connection):
208208
# Verify chat_completions message
209209
cur.execute(CHAT_COMPLETIONS_QUERY)
210210
rows = cur.fetchall()
211-
assert len(rows) == 2 # max_round - 1
211+
assert len(rows) >= 2 # some config may fail
212212

213213
# Verify group chat manager agent
214214
cur.execute(AGENTS_QUERY)

test/agentchat/test_conversable_agent.py

+23-37
Original file line numberDiff line numberDiff line change
@@ -474,7 +474,7 @@ async def test_a_generate_reply_raises_on_messages_and_sender_none(conversable_a
474474
def test_update_function_signature_and_register_functions() -> None:
475475
with pytest.MonkeyPatch.context() as mp:
476476
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
477-
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
477+
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4"}]})
478478

479479
def exec_python(cell: str) -> None:
480480
pass
@@ -618,9 +618,9 @@ def get_origin(d: Dict[str, Callable[..., Any]]) -> Dict[str, Callable[..., Any]
618618
def test_register_for_llm():
619619
with pytest.MonkeyPatch.context() as mp:
620620
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
621-
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
622-
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
623-
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
621+
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4"}]})
622+
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4"}]})
623+
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4"}]})
624624

625625
@agent3.register_for_llm()
626626
@agent2.register_for_llm(name="python")
@@ -691,9 +691,9 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> s
691691
def test_register_for_llm_api_style_function():
692692
with pytest.MonkeyPatch.context() as mp:
693693
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
694-
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
695-
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
696-
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
694+
agent3 = ConversableAgent(name="agent3", llm_config={"config_list": [{"model": "gpt-4"}]})
695+
agent2 = ConversableAgent(name="agent2", llm_config={"config_list": [{"model": "gpt-4"}]})
696+
agent1 = ConversableAgent(name="agent1", llm_config={"config_list": [{"model": "gpt-4"}]})
697697

698698
@agent3.register_for_llm(api_style="function")
699699
@agent2.register_for_llm(name="python", api_style="function")
@@ -762,7 +762,7 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> s
762762
def test_register_for_llm_without_description():
763763
with pytest.MonkeyPatch.context() as mp:
764764
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
765-
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
765+
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4"}]})
766766

767767
with pytest.raises(ValueError) as e:
768768

@@ -774,33 +774,33 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str:
774774

775775

776776
def test_register_for_llm_without_LLM():
777-
try:
777+
with pytest.raises(
778+
ValueError,
779+
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
780+
):
778781
ConversableAgent(name="agent", llm_config=None)
779-
assert False, "Expected ConversableAgent to throw ValueError."
780-
except ValueError as e:
781-
assert e.args[0] == "Please specify the value for 'llm_config'."
782782

783783

784784
def test_register_for_llm_without_configuration():
785-
try:
785+
with pytest.raises(
786+
ValueError,
787+
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
788+
):
786789
ConversableAgent(name="agent", llm_config={"config_list": []})
787-
assert False, "Expected ConversableAgent to throw ValueError."
788-
except ValueError as e:
789-
assert e.args[0] == "Please specify a value for the 'model' in 'llm_config'."
790790

791791

792792
def test_register_for_llm_without_model_name():
793-
try:
794-
ConversableAgent(name="agent", llm_config={"config_list": [{"model": "", "api_key": ""}]})
795-
assert False, "Expected ConversableAgent to throw ValueError."
796-
except ValueError as e:
797-
assert e.args[0] == "Please specify a non-empty 'model' value for every item in 'config_list'."
793+
with pytest.raises(
794+
ValueError,
795+
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
796+
):
797+
ConversableAgent(name="agent", llm_config={"config_list": [{"model": ""}]})
798798

799799

800800
def test_register_for_execution():
801801
with pytest.MonkeyPatch.context() as mp:
802802
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
803-
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
803+
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4"}]})
804804
user_proxy_1 = UserProxyAgent(name="user_proxy_1")
805805
user_proxy_2 = UserProxyAgent(name="user_proxy_2")
806806

@@ -835,7 +835,7 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]):
835835
def test_register_functions():
836836
with pytest.MonkeyPatch.context() as mp:
837837
mp.setenv("OPENAI_API_KEY", MOCK_OPEN_AI_API_KEY)
838-
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4", "api_key": ""}]})
838+
agent = ConversableAgent(name="agent", llm_config={"config_list": [{"model": "gpt-4"}]})
839839
user_proxy = UserProxyAgent(name="user_proxy")
840840

841841
def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str:
@@ -1028,20 +1028,6 @@ def stopwatch(num_seconds: Annotated[str, "Number of seconds in the stopwatch."]
10281028
stopwatch_mock.assert_called_once_with(num_seconds="5")
10291029

10301030

1031-
@pytest.mark.skipif(
1032-
skip,
1033-
reason="do not run if skipping openai",
1034-
)
1035-
def test_no_llm_config():
1036-
# We expect a TypeError when the model isn't specified
1037-
with pytest.raises(TypeError, match=r".*Missing required arguments.*"):
1038-
agent1 = ConversableAgent(name="agent1", llm_config=False, human_input_mode="NEVER", default_auto_reply="")
1039-
agent2 = ConversableAgent(
1040-
name="agent2", llm_config={"api_key": "Intentionally left blank."}, human_input_mode="NEVER"
1041-
)
1042-
agent1.initiate_chat(agent2, message="hi")
1043-
1044-
10451031
if __name__ == "__main__":
10461032
# test_trigger()
10471033
# test_context()

0 commit comments

Comments
 (0)