Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

seed -> cache_seed #600

Merged
merged 1 commit into from
Nov 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions autogen/agentchat/groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,6 @@ def __init__(
max_consecutive_auto_reply: Optional[int] = sys.maxsize,
human_input_mode: Optional[str] = "NEVER",
system_message: Optional[str] = "Group chat manager.",
# seed: Optional[int] = 4,
**kwargs,
):
super().__init__(
Expand All @@ -136,8 +135,6 @@ def __init__(
# Allow async chat if initiated using a_initiate_chat
self.register_reply(Agent, GroupChatManager.a_run_chat, config=groupchat, reset_config=GroupChat.reset)

# self._random = random.Random(seed)

def run_chat(
self,
messages: Optional[List[Dict]] = None,
Expand Down
10 changes: 5 additions & 5 deletions autogen/code_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -509,11 +509,11 @@ def eval_function_completions(
_FUNC_COMPLETION_PROMPT = "# Python 3{definition}"
_FUNC_COMPLETION_STOP = ["\nclass", "\ndef", "\nif", "\nprint"]
_IMPLEMENT_CONFIGS = [
{"model": FAST_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "temperature": 0, "seed": 0},
{"model": FAST_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 7, "seed": 0},
{"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "temperature": 0, "seed": 1},
{"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 2, "seed": 2},
{"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 1, "seed": 2},
{"model": FAST_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "temperature": 0, "cache_seed": 0},
{"model": FAST_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 7, "cache_seed": 0},
{"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "temperature": 0, "cache_seed": 1},
{"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 2, "cache_seed": 2},
{"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 1, "cache_seed": 2},
]


Expand Down
16 changes: 8 additions & 8 deletions autogen/oai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class OpenAIWrapper:
"""A wrapper class for openai client."""

cache_path_root: str = ".cache"
extra_kwargs = {"seed", "filter_func", "allow_format_str_template", "context", "api_version"}
extra_kwargs = {"cache_seed", "filter_func", "allow_format_str_template", "context", "api_version"}
openai_kwargs = set(inspect.getfullargspec(OpenAI.__init__).kwonlyargs)

def __init__(self, *, config_list: List[Dict] = None, **base_config):
Expand Down Expand Up @@ -191,8 +191,8 @@ def create(self, **config):
The actual prompt will be:
"Complete the following sentence: Today I feel".
More examples can be found at [templating](/docs/Use-Cases/enhanced_inference#templating).
- `seed` (int | None) for the cache. Default to 41.
An integer seed is useful when implementing "controlled randomness" for the completion.
- `cache_seed` (int | None) for the cache. Default to 41.
An integer cache_seed is useful when implementing "controlled randomness" for the completion.
None for no caching.
- filter_func (Callable | None): A function that takes in the context and the response
and returns a boolean to indicate whether the response is valid. E.g.,
Expand All @@ -219,12 +219,12 @@ def yes_or_no_filter(context, response):
self._process_for_azure(create_config, extra_kwargs, "extra")
# construct the create params
params = self._construct_create_params(create_config, extra_kwargs)
# get the seed, filter_func and context
seed = extra_kwargs.get("seed", 41)
# get the cache_seed, filter_func and context
cache_seed = extra_kwargs.get("cache_seed", 41)
sonichi marked this conversation as resolved.
Show resolved Hide resolved
filter_func = extra_kwargs.get("filter_func")
context = extra_kwargs.get("context")
with diskcache.Cache(f"{self.cache_path_root}/{seed}") as cache:
if seed is not None:
with diskcache.Cache(f"{self.cache_path_root}/{cache_seed}") as cache:
if cache_seed is not None:
# Try to get the response from cache
key = get_key(params)
response = cache.get(key, None)
Expand All @@ -245,7 +245,7 @@ def yes_or_no_filter(context, response):
if i == last:
raise
else:
if seed is not None:
if cache_seed is not None:
# Cache the response
cache.set(key, response)
return response
Expand Down
18 changes: 9 additions & 9 deletions autogen/oai/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,8 @@ class Completion(openai_Completion):
"prompt": "{prompt}",
}

seed = 41
cache_path = f".cache/{seed}"
cache_seed = 41
cache_path = f".cache/{cache_seed}"
# retry after this many seconds
retry_wait_time = 10
# fail a request after hitting RateLimitError for this many seconds
Expand All @@ -134,7 +134,7 @@ def set_cache(cls, seed: Optional[int] = 41, cache_path_root: Optional[str] = ".
cache_path (str, Optional): The root path for the cache.
The complete cache path will be {cache_path}/{seed}.
"""
cls.seed = seed
cls.cache_seed = seed
cls.cache_path = f"{cache_path_root}/{seed}"

@classmethod
Expand All @@ -145,7 +145,7 @@ def clear_cache(cls, seed: Optional[int] = None, cache_path_root: Optional[str]
seed (int, Optional): The integer identifier for the pseudo seed.
If omitted, all caches under cache_path_root will be cleared.
cache_path (str, Optional): The root path for the cache.
The complete cache path will be {cache_path}/{seed}.
The complete cache path will be {cache_path}/{cache_seed}.
"""
if seed is None:
shutil.rmtree(cache_path_root, ignore_errors=True)
Expand Down Expand Up @@ -773,7 +773,7 @@ def yes_or_no_filter(context, config, response):
Besides the parameters for the openai API call, it can also contain:
- `max_retry_period` (int): the total time (in seconds) allowed for retrying failed requests.
- `retry_wait_time` (int): the time interval to wait (in seconds) before retrying a failed request.
- `seed` (int) for the cache. This is useful when implementing "controlled randomness" for the completion.
- `cache_seed` (int) for the cache. This is useful when implementing "controlled randomness" for the completion.
sonichi marked this conversation as resolved.
Show resolved Hide resolved

Returns:
Responses from OpenAI API, with additional fields.
Expand Down Expand Up @@ -831,11 +831,11 @@ def yes_or_no_filter(context, config, response):
return cls._get_response(
params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout, use_cache=False
)
seed = cls.seed
if "seed" in params:
cls.set_cache(params.pop("seed"))
cache_seed = cls.cache_seed
if "cache_seed" in params:
cls.set_cache(params.pop("cache_seed"))
with diskcache.Cache(cls.cache_path) as cls._cache:
cls.set_cache(seed)
cls.set_cache(cache_seed)
return cls._get_response(params, raise_on_ratelimit_or_timeout=raise_on_ratelimit_or_timeout)

@classmethod
Expand Down
2 changes: 1 addition & 1 deletion autogen/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.2.0b3"
__version__ = "0.2.0b4"
2 changes: 1 addition & 1 deletion notebook/agentchat_RetrieveChat.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@
" system_message=\"You are a helpful assistant.\",\n",
" llm_config={\n",
" \"timeout\": 600,\n",
" \"seed\": 42,\n",
" \"cache_seed\": 42,\n",
" \"config_list\": config_list,\n",
" },\n",
")\n",
Expand Down
6 changes: 3 additions & 3 deletions notebook/agentchat_auto_feedback_from_code_execution.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
},
"outputs": [],
"source": [
"# %pip install pyautogen~=0.2.0b2"
"# %pip install pyautogen~=0.2.0b4"
]
},
{
Expand Down Expand Up @@ -330,7 +330,7 @@
"assistant = autogen.AssistantAgent(\n",
" name=\"assistant\",\n",
" llm_config={\n",
" \"seed\": 42, # seed for caching and reproducibility\n",
" \"cache_seed\": 42, # seed for caching and reproducibility\n",
" \"config_list\": config_list, # a list of OpenAI API configurations\n",
" \"temperature\": 0, # temperature for sampling\n",
" }, # configuration for autogen's enhanced inference API which is compatible with OpenAI API\n",
Expand Down Expand Up @@ -806,7 +806,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.17"
"version": "3.11.4"
},
"vscode": {
"interpreter": {
Expand Down
6 changes: 3 additions & 3 deletions notebook/agentchat_chess.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"# %pip install \"pyautogen~=0.2.0b2\"\n",
"# %pip install \"pyautogen~=0.2.0b4\"\n",
"%pip install chess -U"
]
},
Expand Down Expand Up @@ -292,13 +292,13 @@
" color=\"black\",\n",
" board_agent=board_agent,\n",
" max_turns=max_turn,\n",
" llm_config={\"temperature\": 0.5, \"seed\": 1, \"config_list\": config_list_gpt4},\n",
" llm_config={\"temperature\": 0.5, \"cache_seed\": 1, \"config_list\": config_list_gpt4},\n",
")\n",
"player_white = ChessPlayerAgent(\n",
" color=\"white\",\n",
" board_agent=board_agent,\n",
" max_turns=max_turn,\n",
" llm_config={\"temperature\": 0.5, \"seed\": 2, \"config_list\": config_list_gpt4},\n",
" llm_config={\"temperature\": 0.5, \"cache_seed\": 2, \"config_list\": config_list_gpt4},\n",
")"
]
},
Expand Down
4 changes: 2 additions & 2 deletions notebook/agentchat_groupchat.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"# %pip install pyautogen~=0.2.0b2"
"# %pip install pyautogen~=0.2.0b4"
]
},
{
Expand Down Expand Up @@ -124,7 +124,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm_config = {\"config_list\": config_list_gpt4, \"seed\": 42}\n",
"llm_config = {\"config_list\": config_list_gpt4, \"cache_seed\": 42}\n",
"user_proxy = autogen.UserProxyAgent(\n",
" name=\"User_proxy\",\n",
" system_message=\"A human admin.\",\n",
Expand Down
4 changes: 2 additions & 2 deletions notebook/agentchat_groupchat_research.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"# %pip install pyautogen~=0.2.0b2"
"# %pip install pyautogen~=0.2.0b4"
]
},
{
Expand Down Expand Up @@ -111,7 +111,7 @@
"outputs": [],
"source": [
"gpt4_config = {\n",
" \"seed\": 42, # change the seed for different trials\n",
" \"cache_seed\": 42, # change the cache_seed for different trials\n",
" \"temperature\": 0,\n",
" \"config_list\": config_list_gpt4,\n",
" \"timeout\": 120,\n",
Expand Down
4 changes: 2 additions & 2 deletions notebook/agentchat_groupchat_vis.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"# %pip install pyautogen~=0.2.0b2"
"# %pip install pyautogen~=0.2.0b4"
]
},
{
Expand Down Expand Up @@ -132,7 +132,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm_config = {\"config_list\": config_list_gpt4, \"seed\": 42}\n",
"llm_config = {\"config_list\": config_list_gpt4, \"cache_seed\": 42}\n",
"user_proxy = autogen.UserProxyAgent(\n",
" name=\"User_proxy\",\n",
" system_message=\"A human admin.\",\n",
Expand Down
6 changes: 3 additions & 3 deletions notebook/agentchat_hierarchy_flow_using_select_speaker.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"# %pip install pyautogen~=0.1.0"
"# %pip install pyautogen~=0.2.0b4"
]
},
{
Expand Down Expand Up @@ -163,7 +163,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm_config = {\"config_list\": config_list_gpt4, \"seed\": 42}"
"llm_config = {\"config_list\": config_list_gpt4, \"cache_seed\": 42}"
]
},
{
Expand Down Expand Up @@ -359,7 +359,7 @@
"\n",
"\n",
"# Create the manager\n",
"llm_config = {\"config_list\": config_list_gpt4, \"seed\": 42, \"use_cache\":False} # use_cache is False because we want to observe if there is any communication pattern difference if we reran the group chat.\n",
"llm_config = {\"config_list\": config_list_gpt4, \"cache_seed\": None} # cache_seed is None because we want to observe if there is any communication pattern difference if we reran the group chat.\n",
sonichi marked this conversation as resolved.
Show resolved Hide resolved
"manager = autogen.GroupChatManager(groupchat=group_chat, llm_config=llm_config)\n",
"\n",
"\n",
Expand Down
4 changes: 2 additions & 2 deletions notebook/agentchat_human_feedback.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
},
"outputs": [],
"source": [
"# %pip install pyautogen~=0.2.0b2"
"# %pip install pyautogen~=0.2.0b4"
]
},
{
Expand Down Expand Up @@ -123,7 +123,7 @@
"assistant = autogen.AssistantAgent(\n",
" name=\"assistant\",\n",
" llm_config={\n",
" \"seed\": 41,\n",
" \"cache_seed\": 41,\n",
" \"config_list\": config_list,\n",
" }\n",
")\n",
Expand Down
6 changes: 3 additions & 3 deletions notebook/agentchat_lmm_gpt-4v.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
"source": [
"### Before everything starts, install AutoGen with the `lmm` option\n",
"```bash\n",
"pip install pyautogen[lmm]\n",
"pip install \"pyautogen[lmm]~=0.2.0b4\"\n",
"```"
]
},
Expand Down Expand Up @@ -85,7 +85,7 @@
" },\n",
")\n",
"\n",
"gpt4_llm_config = {\"config_list\": config_list_gpt4, \"seed\": 42}"
"gpt4_llm_config = {\"config_list\": config_list_gpt4, \"cache_seed\": 42}"
]
},
{
Expand Down Expand Up @@ -699,7 +699,7 @@
"# },\n",
"# )\n",
"\n",
"# gpt35_llm_config = {\"config_list\": config_list_gpt35, \"seed\": 42}\n",
"# gpt35_llm_config = {\"config_list\": config_list_gpt35, \"cache_seed\": 42}\n",
"\n",
"\n",
"creator = FigureCreator(\n",
Expand Down
10 changes: 5 additions & 5 deletions notebook/agentchat_lmm_llava.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
"id": "2c75da30",
"metadata": {},
"source": [
"# Agent Chat with Multimodal Models\n",
"# Agent Chat with Multimodal Models: LLaVA\n",
"\n",
"We use **LLaVA** as an example for the multimodal feature. More information about LLaVA can be found in their [GitHub page](https://github.com/haotian-liu/LLaVA)\n",
"This notebook uses **LLaVA** as an example for the multimodal feature. More information about LLaVA can be found in their [GitHub page](https://github.com/haotian-liu/LLaVA)\n",
"\n",
"\n",
"This notebook contains the following information and examples:\n",
Expand All @@ -26,7 +26,7 @@
"source": [
"### Before everything starts, install AutoGen with the `lmm` option\n",
"```bash\n",
"pip install pyautogen[lmm]\n",
"pip install \"pyautogen[lmm]~=0.2.0b4\"\n",
"```"
]
},
Expand Down Expand Up @@ -848,7 +848,7 @@
" },\n",
")\n",
"\n",
"gpt4_llm_config = {\"config_list\": config_list_gpt4, \"seed\": 42}\n",
"gpt4_llm_config = {\"config_list\": config_list_gpt4, \"cache_seed\": 42}\n",
"\n",
"# config_list_gpt35 = autogen.config_list_from_json(\n",
"# \"OAI_CONFIG_LIST\",\n",
Expand All @@ -857,7 +857,7 @@
"# },\n",
"# )\n",
"\n",
"# gpt35_llm_config = {\"config_list\": config_list_gpt35, \"seed\": 42}\n",
"# gpt35_llm_config = {\"config_list\": config_list_gpt35, \"cache_seed\": 42}\n",
"\n",
"\n",
"creator = FigureCreator(\n",
Expand Down
4 changes: 2 additions & 2 deletions notebook/agentchat_planning.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
},
"outputs": [],
"source": [
"# %pip install pyautogen~=0.2.0b2 docker"
"# %pip install pyautogen~=0.2.0b4 docker"
]
},
{
Expand Down Expand Up @@ -156,7 +156,7 @@
" llm_config={\n",
" \"temperature\": 0,\n",
" \"timeout\": 600,\n",
" \"seed\": 42,\n",
" \"cache_seed\": 42,\n",
" \"config_list\": config_list,\n",
" \"functions\": [\n",
" {\n",
Expand Down
4 changes: 2 additions & 2 deletions notebook/agentchat_stream.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
},
"outputs": [],
"source": [
"# %pip install pyautogen~=0.2.0b2"
"# %pip install pyautogen~=0.2.0b4"
]
},
{
Expand Down Expand Up @@ -209,7 +209,7 @@
" name=\"assistant\",\n",
" llm_config={\n",
" \"timeout\": 600,\n",
" \"seed\": 41,\n",
" \"cache_seed\": 41,\n",
" \"config_list\": config_list,\n",
" \"temperature\": 0,\n",
" },\n",
Expand Down
Loading
Loading