Skip to content

Commit

Permalink
improve validation of llm_config
Browse files Browse the repository at this point in the history
  • Loading branch information
sonichi committed Mar 10, 2024
1 parent b0a8e6e commit 25f328a
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 19 deletions.
22 changes: 12 additions & 10 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class ConversableAgent(LLMAgent):
`run_code`, and `execute_function` methods respectively.
"""

DEFAULT_CONFIG = {} # An empty configuration
DEFAULT_CONFIG = None # None or dict, the default config for llm inference
MAX_CONSECUTIVE_AUTO_REPLY = 100 # maximum number of consecutive auto replies (subject to future change)

DEFAULT_SUMMARY_PROMPT = "Summarize the takeaway from the conversation. Do not add any introductory phrases."
Expand Down Expand Up @@ -123,6 +123,7 @@ def __init__(
llm_config (dict or False or None): llm inference configuration.
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
for available options.
When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`.
To disable llm-based auto reply, set to False.
default_auto_reply (str or dict): default auto reply when no code execution or llm-based reply is generated.
description (str): a short description of the agent. This description is used by other agents
Expand All @@ -139,20 +140,14 @@ def __init__(
else (lambda x: content_str(x.get("content")) == "TERMINATE")
)

if llm_config is False:
if llm_config is False or llm_config is None and self.DEFAULT_CONFIG is None:
self.llm_config = False
self.client = None
else:
self.llm_config = self.DEFAULT_CONFIG.copy()
self.llm_config = {} if self.DEFAULT_CONFIG is None else self.DEFAULT_CONFIG.copy()
if isinstance(llm_config, dict):
self.llm_config.update(llm_config)
if "model" not in self.llm_config and (
not self.llm_config.get("config_list")
or any(not config.get("model") for config in self.llm_config["config_list"])
):
raise ValueError(
"Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'."
)
self._validate_llm_config()
self.client = OpenAIWrapper(**self.llm_config)

if logging_enabled():
Expand Down Expand Up @@ -246,6 +241,13 @@ def __init__(
"process_message_before_send": [],
}

def _validate_llm_config(self):
# TODO: more complete validity check
if self.llm_config in [{}, {"config_list": []}, {"config_list": [{"model": ""}]}]:
raise ValueError(
"When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'."
)

@property
def name(self) -> str:
"""Get the name of the agent."""
Expand Down
2 changes: 1 addition & 1 deletion autogen/agentchat/user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def __init__(
- timeout (Optional, int): The maximum execution time in seconds.
- last_n_messages (Experimental, Optional, int): The number of messages to look back for code execution. Default to 1.
default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.
llm_config (dict or False): llm inference configuration.
llm_config (dict or False or None): llm inference configuration.
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
for available options.
Default to false, which disables llm-based auto reply.
Expand Down
1 change: 1 addition & 0 deletions autogen/oai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -356,6 +356,7 @@ def __init__(self, *, config_list: Optional[List[Dict[str, Any]]] = None, **base
base_config: base config. It can contain both keyword arguments for openai client
and additional kwargs.
When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `base_config` or in each config of `config_list`.
"""

if logging_enabled():
Expand Down
2 changes: 1 addition & 1 deletion autogen/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.2.17"
__version__ = "0.2.18"
10 changes: 3 additions & 7 deletions test/agentchat/test_conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -817,25 +817,21 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str:


def test_register_for_llm_without_LLM():
with pytest.raises(
ValueError,
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
):
ConversableAgent(name="agent", llm_config=None)
ConversableAgent(name="agent", llm_config=None)


def test_register_for_llm_without_configuration():
with pytest.raises(
ValueError,
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
match="When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
):
ConversableAgent(name="agent", llm_config={"config_list": []})


def test_register_for_llm_without_model_name():
with pytest.raises(
ValueError,
match="Please either set llm_config to False, or specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
match="When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'.",
):
ConversableAgent(name="agent", llm_config={"config_list": [{"model": ""}]})

Expand Down

0 comments on commit 25f328a

Please sign in to comment.