Skip to content

Commit 8c8b455

Browse files
sonichidavorrunje
andauthored
improve validation of llm_config (microsoft#1946)
* improve validation of llm_config * fixed test_register_for_llm_without_LLM * docstr about llm_config=None * Make None a sentinel * pop tools --------- Co-authored-by: Davor Runje <[email protected]>
1 parent d14b7dd commit 8c8b455

File tree

7 files changed

+44
-27
lines changed

7 files changed

+44
-27
lines changed

autogen/agentchat/conversable_agent.py

+18-16
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ class ConversableAgent(LLMAgent):
6565
`run_code`, and `execute_function` methods respectively.
6666
"""
6767

68-
DEFAULT_CONFIG = {} # An empty configuration
68+
DEFAULT_CONFIG = False # False or dict, the default config for llm inference
6969
MAX_CONSECUTIVE_AUTO_REPLY = 100 # maximum number of consecutive auto replies (subject to future change)
7070

7171
DEFAULT_SUMMARY_PROMPT = "Summarize the takeaway from the conversation. Do not add any introductory phrases."
@@ -123,7 +123,9 @@ def __init__(
123123
llm_config (dict or False or None): llm inference configuration.
124124
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
125125
for available options.
126+
When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`.
126127
To disable llm-based auto reply, set to False.
128+
When set to None, will use self.DEFAULT_CONFIG, which defaults to False.
127129
default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated.
128130
description (str): a short description of the agent. This description is used by other agents
129131
(e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message)
@@ -139,21 +141,7 @@ def __init__(
139141
else (lambda x: content_str(x.get("content")) == "TERMINATE")
140142
)
141143

142-
if llm_config is False:
143-
self.llm_config = False
144-
self.client = None
145-
else:
146-
self.llm_config = self.DEFAULT_CONFIG.copy()
147-
if isinstance(llm_config, dict):
148-
self.llm_config.update(llm_config)
149-
if "model" not in self.llm_config and (
150-
not self.llm_config.get("config_list")
151-
or any(not config.get("model") for config in self.llm_config["config_list"])
152-
):
153-
raise ValueError(
154-
"Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'."
155-
)
156-
self.client = OpenAIWrapper(**self.llm_config)
144+
self._validate_llm_config(llm_config)
157145

158146
if logging_enabled():
159147
log_new_agent(self, locals())
@@ -246,6 +234,20 @@ def __init__(
246234
"process_message_before_send": [],
247235
}
248236

237+
def _validate_llm_config(self, llm_config):
238+
assert llm_config in (None, False) or isinstance(
239+
llm_config, dict
240+
), "llm_config must be a dict or False or None."
241+
if llm_config is None:
242+
llm_config = self.DEFAULT_CONFIG
243+
self.llm_config = self.DEFAULT_CONFIG if llm_config is None else llm_config
244+
# TODO: more complete validity check
245+
if self.llm_config in [{}, {"config_list": []}, {"config_list": [{"model": ""}]}]:
246+
raise ValueError(
247+
"When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'."
248+
)
249+
self.client = None if self.llm_config is False else OpenAIWrapper(**self.llm_config)
250+
249251
@property
250252
def name(self) -> str:
251253
"""Get the name of the agent."""

autogen/agentchat/groupchat.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -514,7 +514,11 @@ def __init__(
514514
system_message: Optional[Union[str, List]] = "Group chat manager.",
515515
**kwargs,
516516
):
517-
if kwargs.get("llm_config") and (kwargs["llm_config"].get("functions") or kwargs["llm_config"].get("tools")):
517+
if (
518+
kwargs.get("llm_config")
519+
and isinstance(kwargs["llm_config"], dict)
520+
and (kwargs["llm_config"].get("functions") or kwargs["llm_config"].get("tools"))
521+
):
518522
raise ValueError(
519523
"GroupChatManager is not allowed to make function/tool calls. Please remove the 'functions' or 'tools' config in 'llm_config' you passed in."
520524
)

autogen/agentchat/user_proxy_agent.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -70,10 +70,11 @@ def __init__(
7070
- timeout (Optional, int): The maximum execution time in seconds.
7171
- last_n_messages (Experimental, Optional, int): The number of messages to look back for code execution. Default to 1.
7272
default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.
73-
llm_config (dict or False): llm inference configuration.
73+
llm_config (dict or False or None): llm inference configuration.
7474
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
7575
for available options.
76-
Default to false, which disables llm-based auto reply.
76+
Default to False, which disables llm-based auto reply.
77+
When set to None, will use self.DEFAULT_CONFIG, which defaults to False.
7778
system_message (str or List): system message for ChatCompletion inference.
7879
Only used when llm_config is not False. Use it to reprogram the agent.
7980
description (str): a short description of the agent. This description is used by other agents

autogen/oai/client.py

+1
Original file line numberDiff line numberDiff line change
@@ -356,6 +356,7 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base
356356
357357
base_config: base config. It can contain both keyword arguments for openai client
358358
and additional kwargs.
359+
When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `base_config` or in each config of `config_list`.
359360
"""
360361

361362
if logging_enabled():

autogen/version.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "0.2.17"
1+
__version__ = "0.2.18"

notebook/agentchat_function_call_async.ipynb

+7-2
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,7 @@
240240
"\n",
241241
"llm_config_manager = llm_config.copy()\n",
242242
"llm_config_manager.pop(\"functions\", None)\n",
243+
"llm_config_manager.pop(\"tools\", None)\n",
243244
"\n",
244245
"manager = autogen.GroupChatManager(\n",
245246
" groupchat=groupchat,\n",
@@ -361,8 +362,12 @@
361362
],
362363
"metadata": {
363364
"front_matter": {
364-
"tags": ["code generation", "function call", "async"],
365-
"description": "Learn how to implement both synchronous and asynchronous function calls using AssistantAgent and UserProxyAgent in AutoGen, with examples of their application in individual and group chat settings for task execution with language models."
365+
"description": "Learn how to implement both synchronous and asynchronous function calls using AssistantAgent and UserProxyAgent in AutoGen, with examples of their application in individual and group chat settings for task execution with language models.",
366+
"tags": [
367+
"code generation",
368+
"function call",
369+
"async"
370+
]
366371
},
367372
"kernelspec": {
368373
"display_name": "flaml_dev",

test/agentchat/test_conversable_agent.py

+9-5
Original file line numberDiff line numberDiff line change
@@ -817,25 +817,29 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str:
817817

818818

819819
def test_register_for_llm_without_LLM():
820+
agent = ConversableAgent(name="agent", llm_config=None)
820821
with pytest.raises(
821-
ValueError,
822-
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
822+
AssertionError,
823+
match="To update a tool signature, agent must have an llm_config",
823824
):
824-
ConversableAgent(name="agent", llm_config=None)
825+
826+
@agent.register_for_llm(description="do things.")
827+
def do_stuff(s: str) -> str:
828+
return f"{s} done"
825829

826830

827831
def test_register_for_llm_without_configuration():
828832
with pytest.raises(
829833
ValueError,
830-
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
834+
match="When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
831835
):
832836
ConversableAgent(name="agent", llm_config={"config_list": []})
833837

834838

835839
def test_register_for_llm_without_model_name():
836840
with pytest.raises(
837841
ValueError,
838-
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
842+
match="When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
839843
):
840844
ConversableAgent(name="agent", llm_config={"config_list": [{"model": ""}]})
841845

0 commit comments

Comments
 (0)