Skip to content

Commit 9069eb9

Browse files
skzhang1“skzhang1”sonichi
authored
support llm_config in AgentOptimizer (#2299)
* support llm_config in agentoptimizer * fix doc * restore seed timeout --------- Co-authored-by: “skzhang1” <“[email protected]”> Co-authored-by: Chi Wang <[email protected]>
1 parent 97b5433 commit 9069eb9

File tree

4 files changed

+47
-40
lines changed

4 files changed

+47
-40
lines changed

autogen/agentchat/contrib/agent_optimizer.py

+16-13
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import copy
22
import json
3-
from typing import Dict, List, Optional
3+
from typing import Dict, List, Literal, Optional, Union
44

55
import autogen
66
from autogen.code_utils import execute_code
@@ -172,16 +172,16 @@ class AgentOptimizer:
172172
def __init__(
173173
self,
174174
max_actions_per_step: int,
175-
config_file_or_env: Optional[str] = "OAI_CONFIG_LIST",
176-
config_file_location: Optional[str] = "",
175+
llm_config: dict,
177176
optimizer_model: Optional[str] = "gpt-4-1106-preview",
178177
):
179178
"""
180179
(These APIs are experimental and may change in the future.)
181180
Args:
182181
max_actions_per_step (int): the maximum number of actions that the optimizer can take in one step.
183-
config_file_or_env: path or environment of the OpenAI api configs.
184-
config_file_location: the location of the OpenAI config file.
182+
llm_config (dict): llm inference configuration.
183+
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create) for available options.
184+
When using OpenAI or Azure OpenAI endpoints, please specify a non-empty 'model' either in `llm_config` or in each config of 'config_list' in `llm_config`.
185185
optimizer_model: the model used for the optimizer.
186186
"""
187187
self.max_actions_per_step = max_actions_per_step
@@ -199,14 +199,17 @@ def __init__(
199199
self._failure_functions_performance = []
200200
self._best_performance = -1
201201

202-
config_list = autogen.config_list_from_json(
203-
config_file_or_env,
204-
file_location=config_file_location,
205-
filter_dict={"model": [self.optimizer_model]},
202+
assert isinstance(llm_config, dict), "llm_config must be a dict"
203+
llm_config = copy.deepcopy(llm_config)
204+
self.llm_config = llm_config
205+
if self.llm_config in [{}, {"config_list": []}, {"config_list": [{"model": ""}]}]:
206+
raise ValueError(
207+
"When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'."
208+
)
209+
self.llm_config["config_list"] = autogen.filter_config(
210+
llm_config["config_list"], {"model": [self.optimizer_model]}
206211
)
207-
if len(config_list) == 0:
208-
raise RuntimeError("No valid openai config found in the config file or environment variable.")
209-
self._client = autogen.OpenAIWrapper(config_list=config_list)
212+
self._client = autogen.OpenAIWrapper(**self.llm_config)
210213

211214
def record_one_conversation(self, conversation_history: List[Dict], is_satisfied: bool = None):
212215
"""
@@ -266,7 +269,7 @@ def step(self):
266269
actions_num=action_index,
267270
best_functions=best_functions,
268271
incumbent_functions=incumbent_functions,
269-
accumerated_experience=failure_experience_prompt,
272+
accumulated_experience=failure_experience_prompt,
270273
statistic_informations=statistic_prompt,
271274
)
272275
messages = [{"role": "user", "content": prompt}]

notebook/agentchat_agentoptimizer.ipynb

+14-9
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
"source": [
4242
"import copy\n",
4343
"import json\n",
44+
"import os\n",
4445
"from typing import Any, Callable, Dict, List, Optional, Tuple, Union\n",
4546
"\n",
4647
"from openai import BadRequestError\n",
@@ -299,16 +300,22 @@
299300
"metadata": {},
300301
"outputs": [],
301302
"source": [
302-
"config_list = config_list_from_json(env_or_file=\"OAI_CONFIG_LIST\")\n",
303+
"llm_config = {\n",
304+
" \"config_list\": [\n",
305+
" {\n",
306+
" \"model\": \"gpt-4-1106-preview\",\n",
307+
" \"api_type\": \"azure\",\n",
308+
" \"api_key\": os.environ[\"AZURE_OPENAI_API_KEY\"],\n",
309+
" \"base_url\": \"https://ENDPOINT.openai.azure.com/\",\n",
310+
" \"api_version\": \"2023-07-01-preview\",\n",
311+
" }\n",
312+
" ]\n",
313+
"}\n",
303314
"\n",
304315
"assistant = autogen.AssistantAgent(\n",
305316
" name=\"assistant\",\n",
306317
" system_message=\"You are a helpful assistant.\",\n",
307-
" llm_config={\n",
308-
" \"timeout\": 600,\n",
309-
" \"seed\": 42,\n",
310-
" \"config_list\": config_list,\n",
311-
" },\n",
318+
" llm_config=llm_config,\n",
312319
")\n",
313320
"user_proxy = MathUserProxyAgent(\n",
314321
" name=\"mathproxyagent\",\n",
@@ -361,9 +368,7 @@
361368
"source": [
362369
"EPOCH = 10\n",
363370
"optimizer_model = \"gpt-4-1106-preview\"\n",
364-
"optimizer = AgentOptimizer(\n",
365-
" max_actions_per_step=3, config_file_or_env=\"OAI_CONFIG_LIST\", optimizer_model=optimizer_model\n",
366-
")\n",
371+
"optimizer = AgentOptimizer(max_actions_per_step=3, llm_config=llm_config, optimizer_model=optimizer_model)\n",
367372
"for i in range(EPOCH):\n",
368373
" for index, query in enumerate(train_data):\n",
369374
" is_correct = user_proxy.initiate_chat(assistant, answer=query[\"answer\"], problem=query[\"question\"])\n",

test/agentchat/contrib/test_agent_optimizer.py

+15-16
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,13 @@ def test_record_conversation():
2222
OAI_CONFIG_LIST,
2323
file_location=KEY_LOC,
2424
)
25-
assistant = AssistantAgent(
26-
"assistant",
27-
system_message="You are a helpful assistant.",
28-
llm_config={
29-
"timeout": 60,
30-
"cache_seed": 42,
31-
"config_list": config_list,
32-
},
33-
)
25+
llm_config = {
26+
"config_list": config_list,
27+
"timeout": 60,
28+
"cache_seed": 42,
29+
}
30+
31+
assistant = AssistantAgent("assistant", system_message="You are a helpful assistant.", llm_config=llm_config)
3432
user_proxy = UserProxyAgent(
3533
name="user_proxy",
3634
human_input_mode="NEVER",
@@ -43,7 +41,7 @@ def test_record_conversation():
4341
)
4442

4543
user_proxy.initiate_chat(assistant, message=problem)
46-
optimizer = AgentOptimizer(max_actions_per_step=3, config_file_or_env=OAI_CONFIG_LIST)
44+
optimizer = AgentOptimizer(max_actions_per_step=3, llm_config=llm_config)
4745
optimizer.record_one_conversation(assistant.chat_messages_for_summary(user_proxy), is_satisfied=True)
4846

4947
assert len(optimizer._trial_conversations_history) == 1
@@ -66,14 +64,15 @@ def test_step():
6664
OAI_CONFIG_LIST,
6765
file_location=KEY_LOC,
6866
)
67+
llm_config = {
68+
"config_list": config_list,
69+
"timeout": 60,
70+
"cache_seed": 42,
71+
}
6972
assistant = AssistantAgent(
7073
"assistant",
7174
system_message="You are a helpful assistant.",
72-
llm_config={
73-
"timeout": 60,
74-
"cache_seed": 42,
75-
"config_list": config_list,
76-
},
75+
llm_config=llm_config,
7776
)
7877
user_proxy = UserProxyAgent(
7978
name="user_proxy",
@@ -86,7 +85,7 @@ def test_step():
8685
max_consecutive_auto_reply=3,
8786
)
8887

89-
optimizer = AgentOptimizer(max_actions_per_step=3, config_file_or_env=OAI_CONFIG_LIST)
88+
optimizer = AgentOptimizer(max_actions_per_step=3, llm_config=llm_config)
9089
user_proxy.initiate_chat(assistant, message=problem)
9190
optimizer.record_one_conversation(assistant.chat_messages_for_summary(user_proxy), is_satisfied=True)
9291

website/blog/2023-12-23-AgentOptimizer/index.mdx

+2-2
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ is_satisfied is a bool value that represents whether the user is satisfied with
4242
Example:
4343

4444
```python
45-
optimizer = AgentOptimizer(max_actions_per_step=3, config_file_or_env="OAI_CONFIG_LIST")
45+
optimizer = AgentOptimizer(max_actions_per_step=3, llm_config = llm_config)
4646
# ------------ code to solve a problem ------------
4747
# ......
4848
# -------------------------------------------------
@@ -76,7 +76,7 @@ Moreover, it also includes mechanisms to check whether each update is feasible,
7676
The optimization process is as follows:
7777

7878
```python
79-
optimizer = AgentOptimizer(max_actions_per_step=3, config_file_or_env="OAI_CONFIG_LIST")
79+
optimizer = AgentOptimizer(max_actions_per_step=3, llm_config = llm_config)
8080
for i in range(EPOCH):
8181
is_correct = user_proxy.initiate_chat(assistant, message = problem)
8282
history = assistant.chat_messages_for_summary(user_proxy)

0 commit comments

Comments
 (0)