Skip to content

Commit c374537

Browse files
authored
Sets the umask before executing the task in Docker. (#593)
* Sets the umask before executing the task in Docker. * Added version backward compatibility for disabling cache and setting timeouts.
1 parent d542340 commit c374537

File tree

5 files changed

+44
-19
lines changed

5 files changed

+44
-19
lines changed

samples/tools/testbed/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
The Autogen Testbed environment is a tool for repeatedly running a set of pre-defined Autogen scenarios in a setting with tightly-controlled initial conditions. With each run, Autogen will start from a blank slate, working out what code needs to be written, and what libraries or dependencies to install. The results of each run are logged, and can be ingested by analysis or metrics scripts (see the HumanEval example later in this README). By default, all runs are conducted in freshly-initialized docker containers, providing the recommended level of consistency and safety.
44

5-
This Testbed sample has been tested in, and is known to work with, Autogen versions 0.1.14 and 0.2.0b1
5+
This Testbed sample has been tested in, and is known to work with, Autogen versions 0.1.14 and 0.2.0b5
66

77
## Setup
88

samples/tools/testbed/includes/testbed_utils.py

+32-2
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,40 @@
1-
from importlib.metadata import version as lib_version
1+
from pkg_resources import packaging
22
from datetime import datetime
33
import os
44
import autogen
55
import json
66

77

8+
def default_llm_config(config_list, timeout=180):
9+
"""Return a default config list with a given timeout, and with caching disabled.
10+
The formatting depends on the version of Autogen installed.
11+
12+
Args:
13+
config_list (list): the OAI config list to include in the final llm_config
14+
timeout (int): the timeout for calls to the LLM
15+
16+
Returns:
17+
None
18+
"""
19+
llm_config = {
20+
"config_list": config_list,
21+
}
22+
23+
# Add options depending on the version
24+
version = packaging.version.parse(autogen.__version__)
25+
if version < packaging.version.parse("0.2.0b1"):
26+
llm_config["request_timeout"] = timeout
27+
llm_config["use_cache"] = False
28+
elif version < packaging.version.parse("0.2.0b4"):
29+
llm_config["timeout"] = timeout
30+
llm_config["cache"] = None
31+
else:
32+
llm_config["timeout"] = timeout
33+
llm_config["cache_seed"] = None
34+
35+
return llm_config
36+
37+
838
def init():
939
"""Helper function to initialize logging in a testbed scenario.
1040
Specifically, write timestamp and version information, then
@@ -20,7 +50,7 @@ def init():
2050
# Print some information about the run
2151
with open("timestamp.txt", "wt") as f:
2252
f.write("Timestamp: " + datetime.now().isoformat() + "\n")
23-
f.write("pyautogen version: " + lib_version("pyautogen") + "\n")
53+
f.write("pyautogen version: " + str(autogen.__version__) + "\n")
2454

2555

2656
def finalize(agents):

samples/tools/testbed/run_scenarios.py

+1
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,7 @@ def run_scenario_in_docker(work_dir, timeout=600):
189189
with open(os.path.join(work_dir, "run.sh"), "wt") as f:
190190
f.write(
191191
"""#
192+
umask 000
192193
. ./ENV
193194
pip install pyautogen
194195
python scenario.py

samples/tools/testbed/scenarios/default_two_agents.py

+5-8
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,22 @@
1-
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
21
import os
32
import json
3+
import autogen
44
import testbed_utils
55

66
testbed_utils.init()
77
##############################
88

9-
config_list = config_list_from_json(
9+
config_list = autogen.config_list_from_json(
1010
"OAI_CONFIG_LIST",
1111
filter_dict={"model": ["__MODEL__"]},
1212
)
1313

14-
assistant = AssistantAgent(
14+
assistant = autogen.AssistantAgent(
1515
"assistant",
1616
is_termination_msg=lambda x: x.get("content", "").rstrip().find("TERMINATE") >= 0,
17-
llm_config={
18-
# "request_timeout": 180, # Remove for autogen version >= 0.2, and OpenAI version >= 1.0
19-
"config_list": config_list,
20-
},
17+
llm_config=testbed_utils.default_llm_config(config_list, timeout=180),
2118
)
22-
user_proxy = UserProxyAgent(
19+
user_proxy = autogen.UserProxyAgent(
2320
"user_proxy",
2421
human_input_mode="NEVER",
2522
is_termination_msg=lambda x: x.get("content", "").rstrip().find("TERMINATE") >= 0,

samples/tools/testbed/scenarios/human_eval_two_agents.py

+5-8
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
1-
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
21
import os
32
import json
43
import base64
54
import testbed_utils
5+
import autogen
66

77
# NOTE:
88
# This scenario runs Human Eval in a slightly unconventional way:
@@ -36,20 +36,17 @@ def run_tests(candidate):
3636

3737

3838
# Ok, now get autogen to solve it.
39-
config_list = config_list_from_json(
39+
config_list = autogen.config_list_from_json(
4040
"OAI_CONFIG_LIST",
4141
filter_dict={"model": ["__MODEL__"]},
4242
)
4343

44-
assistant = AssistantAgent(
44+
assistant = autogen.AssistantAgent(
4545
"assistant",
4646
is_termination_msg=lambda x: x.get("content", "").rstrip().find("TERMINATE") >= 0,
47-
llm_config={
48-
# "request_timeout": 180, # Remove for autogen version >= 0.2, and OpenAI version >= 1.0
49-
"config_list": config_list,
50-
},
47+
llm_config=testbed_utils.default_llm_config(config_list, timeout=180),
5148
)
52-
user_proxy = UserProxyAgent(
49+
user_proxy = autogen.UserProxyAgent(
5350
"user_proxy",
5451
human_input_mode="NEVER",
5552
is_termination_msg=lambda x: x.get("content", "").rstrip().find("TERMINATE") >= 0,

0 commit comments

Comments
 (0)