From ee4f9bfd7b986fa79cbd12918eba71397115a069 Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Fri, 11 Aug 2023 01:34:14 +0000
Subject: [PATCH 01/13] silent; code_execution_config; exit; version
---
flaml/autogen/agentchat/groupchat.py | 25 +-
flaml/autogen/agentchat/responsive_agent.py | 164 +++--
flaml/autogen/agentchat/user_proxy_agent.py | 1 +
flaml/version.py | 2 +-
notebook/autogen_agentchat_MathChat.ipynb | 2 +-
...at_auto_feedback_from_code_execution.ipynb | 4 +-
notebook/autogen_agentchat_chess.ipynb | 644 ++++++------------
.../autogen_agentchat_function_call.ipynb | 4 +-
notebook/autogen_agentchat_groupchat.ipynb | 449 ++++++------
.../autogen_agentchat_human_feedback.ipynb | 4 +-
notebook/autogen_agentchat_planning.ipynb | 4 +-
notebook/autogen_agentchat_stream.ipynb | 10 +-
notebook/autogen_agentchat_two_users.ipynb | 4 +-
notebook/autogen_agentchat_web_info.ipynb | 4 +-
notebook/autogen_openai_completion.ipynb | 2 +-
test/autogen/agentchat/test_async.py | 6 +-
test/autogen/agentchat/test_groupchat.py | 4 +-
.../agentchat/test_responsive_agent.py | 16 +-
website/docs/Getting-Started.md | 4 +-
website/docs/Installation.md | 2 +-
.../{Auto-Generation.md => Autogen.md} | 0
21 files changed, 586 insertions(+), 769 deletions(-)
rename website/docs/Use-Cases/{Auto-Generation.md => Autogen.md} (100%)
diff --git a/flaml/autogen/agentchat/groupchat.py b/flaml/autogen/agentchat/groupchat.py
index 14faf36389..343c56e9b6 100644
--- a/flaml/autogen/agentchat/groupchat.py
+++ b/flaml/autogen/agentchat/groupchat.py
@@ -33,7 +33,9 @@ def next_agent(self, agent: Agent) -> Agent:
def select_speaker_msg(self):
"""Return the message for selecting the next speaker."""
return f"""You are in a role play game. The following roles are available:
-{self._participant_roles()}. Read the following conversation.
+{self._participant_roles()}.
+
+Read the following conversation.
Then select the next role from {self.agent_names} to play. Only return the role."""
def select_speaker(self, last_speaker: Agent, selctor: ResponsiveAgent):
@@ -73,32 +75,35 @@ def __init__(
system_message=system_message,
**kwargs,
)
- self.register_auto_reply(Agent, GroupChatManager.run_chat, context=groupchat, reset_context=GroupChat.reset)
+ self.register_auto_reply(Agent, GroupChatManager.run_chat, config=groupchat, reset_config=GroupChat.reset)
# self._random = random.Random(seed)
def run_chat(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
- context: Optional[GroupChat] = None,
+ config: Optional[GroupChat] = None,
) -> Union[str, Dict, None]:
"""Run a group chat."""
if messages is None:
messages = self._oai_messages[sender]
message = messages[-1]
speaker = sender
- for i in range(context.max_round):
+ for i in range(config.max_round):
# set the name to speaker's name if the role is not function
if message["role"] != "function":
message["name"] = speaker.name
- context.messages.append(message)
+ config.messages.append(message)
# broadcast the message to all agents except the speaker
- for agent in context.agents:
+ for agent in config.agents:
if agent != speaker:
- self.send(message, agent, request_reply=False)
- if i != context.max_round - 1:
+ self.send(message, agent, request_reply=False, silent=True)
+ if i != config.max_round - 1:
# speaker selection msg from an agent
- speaker = context.select_speaker(speaker, self)
- speaker.send(speaker.generate_reply(sender=self), self, request_reply=False)
+ speaker = config.select_speaker(speaker, self)
+ reply = speaker.generate_reply(sender=self)
+ if reply is None:
+ break
+ speaker.send(reply, self, request_reply=False)
message = self.last_message(speaker)
return True, None
diff --git a/flaml/autogen/agentchat/responsive_agent.py b/flaml/autogen/agentchat/responsive_agent.py
index 72650cb39e..d15509ab96 100644
--- a/flaml/autogen/agentchat/responsive_agent.py
+++ b/flaml/autogen/agentchat/responsive_agent.py
@@ -81,6 +81,7 @@ def __init__(
If the code is executed in the current environment,
the code must be trusted.
- timeout (Optional, int): The maximum execution time in seconds.
+ - last_n_messages (Experimental, Optional, int): The number of messages to look back for code execution. Default to 1.
llm_config (dict or False): llm inference configuration.
Please refer to [autogen.Completion.create](/docs/reference/autogen/oai/completion#create)
for available options.
@@ -122,8 +123,8 @@ def register_auto_reply(
trigger: Union[Type[Agent], str, Agent, Callable[[Agent], bool], List],
reply_func: Callable,
position: Optional[int] = 0,
- context: Optional[Any] = None,
- reset_context: Optional[Callable] = None,
+ config: Optional[Any] = None,
+ reset_config: Optional[Callable] = None,
):
"""Register a reply function.
@@ -139,22 +140,22 @@ def register_auto_reply(
- If a callable is provided, the reply function will be called when the callable returns True.
- If a list is provided, the reply function will be called when any of the triggers in the list is activated.
reply_func (Callable): the reply function.
- The function takes a recipient agent, a list of messages, a sender agent and a context as input and returns a reply message.
+ The function takes a recipient agent, a list of messages, a sender agent and a config as input and returns a reply message.
```python
def reply_func(
recipient: ResponsiveAgent,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
- context: Optional[Any] = None,
+ config: Optional[Any] = None,
) -> Union[str, Dict, None]:
```
position (int): the position of the reply function in the reply function list.
The function registered later will be checked earlier by default.
To change the order, set the position to a positive integer.
- context (Any): the context to be passed to the reply function.
- When an agent is reset, the context will be reset to the original value.
- reset_context (Callable): the function to reset the context.
- The function returns None. Signature: ```def reset_context(context: Any)```
+ config (Any): the config to be passed to the reply function.
+ When an agent is reset, the config will be reset to the original value.
+ reset_config (Callable): the function to reset the config.
+ The function returns None. Signature: ```def reset_config(config: Any)```
"""
if not isinstance(trigger, (type, str, Agent, Callable, list)):
raise ValueError("trigger must be a class, a string, an agent, a callable or a list.")
@@ -163,9 +164,9 @@ def reply_func(
{
"trigger": trigger,
"reply_func": reply_func,
- "context": copy.copy(context),
- "init_context": context,
- "reset_context": reset_context,
+ "config": copy.copy(config),
+ "init_config": config,
+ "reset_config": reset_config,
},
)
@@ -268,7 +269,13 @@ def _append_oai_message(self, message: Union[Dict, str], role, conversation_id:
self._oai_messages[conversation_id].append(oai_message)
return True
- def send(self, message: Union[Dict, str], recipient: Agent, request_reply: Optional[bool] = None) -> bool:
+ def send(
+ self,
+ message: Union[Dict, str],
+ recipient: Agent,
+ request_reply: Optional[bool] = None,
+ silent: Optional[bool] = False,
+ ) -> bool:
"""Send a message to another agent.
Args:
@@ -296,6 +303,7 @@ def send(self, message: Union[Dict, str], recipient: Agent, request_reply: Optio
the content of the "link" later.
recipient (Agent): the recipient of the message.
request_reply (bool or None): whether to request a reply from the recipient.
+ silent (bool or None): (Experimental) whether to print the message sent.
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
@@ -304,13 +312,19 @@ def send(self, message: Union[Dict, str], recipient: Agent, request_reply: Optio
# unless it's "function".
valid = self._append_oai_message(message, "assistant", recipient)
if valid:
- recipient.receive(message, self, request_reply)
+ recipient.receive(message, self, request_reply, silent)
else:
raise ValueError(
"Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
)
- async def a_send(self, message: Union[Dict, str], recipient: Agent, request_reply: Optional[bool] = None) -> bool:
+ async def a_send(
+ self,
+ message: Union[Dict, str],
+ recipient: Agent,
+ request_reply: Optional[bool] = None,
+ silent: Optional[bool] = False,
+ ) -> bool:
"""(async) Send a message to another agent.
Args:
@@ -338,6 +352,7 @@ async def a_send(self, message: Union[Dict, str], recipient: Agent, request_repl
the content of the "link" later.
recipient (Agent): the recipient of the message.
request_reply (bool or None): whether to request a reply from the recipient.
+ silent (bool or None): (Experimental) whether to print the message sent.
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
@@ -346,7 +361,7 @@ async def a_send(self, message: Union[Dict, str], recipient: Agent, request_repl
# unless it's "function".
valid = self._append_oai_message(message, "assistant", recipient)
if valid:
- await recipient.a_receive(message, self, request_reply)
+ await recipient.a_receive(message, self, request_reply, silent)
else:
raise ValueError(
"Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
@@ -382,7 +397,7 @@ def _print_received_message(self, message: Union[Dict, str], sender: Agent):
print(colored("*" * len(func_print), "green"), flush=True)
print("\n", "-" * 80, flush=True, sep="")
- def _process_received_message(self, message, sender):
+ def _process_received_message(self, message, sender, silent):
message = self._message_to_dict(message)
# When the agent receives a message, the role of the message is "user". (If 'role' exists and is 'function', it will remain unchanged.)
valid = self._append_oai_message(message, "user", sender)
@@ -390,9 +405,16 @@ def _process_received_message(self, message, sender):
raise ValueError(
"Received message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
)
- self._print_received_message(message, sender)
+ if not silent:
+ self._print_received_message(message, sender)
- def receive(self, message: Union[Dict, str], sender: Agent, request_reply: Optional[bool] = None):
+ def receive(
+ self,
+ message: Union[Dict, str],
+ sender: Agent,
+ request_reply: Optional[bool] = None,
+ silent: Optional[bool] = False,
+ ):
"""Receive a message from another agent.
Once a message is received, this function sends a reply to the sender or stop.
@@ -410,18 +432,25 @@ def receive(self, message: Union[Dict, str], sender: Agent, request_reply: Optio
sender: sender of an Agent instance.
request_reply (bool or None): whether a reply is requested from the sender.
If None, the value is determined by `self.reply_at_receive[sender]`.
+ silent (bool or None): (Experimental) whether to print the message received.
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
"""
- self._process_received_message(message, sender)
+ self._process_received_message(message, sender, silent)
if request_reply is False or request_reply is None and self.reply_at_receive[sender] is False:
return
- reply = self.generate_reply(sender=sender)
+ reply = self.generate_reply(messages=self.chat_messages[sender], sender=sender)
if reply is not None:
- self.send(reply, sender)
+ self.send(reply, sender, silent=silent)
- async def a_receive(self, message: Union[Dict, str], sender: Agent, request_reply: Optional[bool] = None):
+ async def a_receive(
+ self,
+ message: Union[Dict, str],
+ sender: Agent,
+ request_reply: Optional[bool] = None,
+ silent: Optional[bool] = False,
+ ):
"""(async) Receive a message from another agent.
Once a message is received, this function sends a reply to the sender or stop.
@@ -439,16 +468,17 @@ async def a_receive(self, message: Union[Dict, str], sender: Agent, request_repl
sender: sender of an Agent instance.
request_reply (bool or None): whether a reply is requested from the sender.
If None, the value is determined by `self.reply_at_receive[sender]`.
+ silent (bool or None): (Experimental) whether to print the message received.
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
"""
- self._process_received_message(message, sender)
+ self._process_received_message(message, sender, silent)
if request_reply is False or request_reply is None and self.reply_at_receive[sender] is False:
return
reply = await self.a_generate_reply(sender=sender)
if reply is not None:
- await self.a_send(reply, sender)
+ await self.a_send(reply, sender, silent=silent)
def _prepare_chat(self, recipient, clear_history):
self.reset_consecutive_auto_reply_counter(recipient)
@@ -458,7 +488,13 @@ def _prepare_chat(self, recipient, clear_history):
self.clear_history(recipient)
recipient.clear_history(self)
- def initiate_chat(self, recipient: "ResponsiveAgent", clear_history: Optional[bool] = True, **context):
+ def initiate_chat(
+ self,
+ recipient: "ResponsiveAgent",
+ clear_history: Optional[bool] = True,
+ silent: Optional[bool] = False,
+ **context,
+ ):
"""Initiate a chat with the recipient agent.
Reset the consecutive auto reply counter.
@@ -468,13 +504,20 @@ def initiate_chat(self, recipient: "ResponsiveAgent", clear_history: Optional[bo
Args:
recipient: the recipient agent.
clear_history (bool): whether to clear the chat history with the agent.
+ silent (bool or None): (Experimental) whether to print the messages for this conversation.
**context: any context information.
"message" needs to be provided if the `generate_init_message` method is not overridden.
"""
self._prepare_chat(recipient, clear_history)
- self.send(self.generate_init_message(**context), recipient)
+ self.send(self.generate_init_message(**context), recipient, silent=silent)
- async def a_initiate_chat(self, recipient: "ResponsiveAgent", clear_history: Optional[bool] = True, **context):
+ async def a_initiate_chat(
+ self,
+ recipient: "ResponsiveAgent",
+ clear_history: Optional[bool] = True,
+ silent: Optional[bool] = False,
+ **context,
+ ):
"""(async) Initiate a chat with the recipient agent.
Reset the consecutive auto reply counter.
@@ -484,11 +527,12 @@ async def a_initiate_chat(self, recipient: "ResponsiveAgent", clear_history: Opt
Args:
recipient: the recipient agent.
clear_history (bool): whether to clear the chat history with the agent.
+ silent (bool or None): (Experimental) whether to print the messages for this conversation.
**context: any context information.
"message" needs to be provided if the `generate_init_message` method is not overridden.
"""
self._prepare_chat(recipient, clear_history)
- await self.a_send(self.generate_init_message(**context), recipient)
+ await self.a_send(self.generate_init_message(**context), recipient, silent=silent)
def reset(self):
"""Reset the agent."""
@@ -496,10 +540,10 @@ def reset(self):
self.reset_consecutive_auto_reply_counter()
self.stop_reply_at_receive()
for reply_func_tuple in self._reply_func_list:
- if reply_func_tuple["reset_context"] is not None:
- reply_func_tuple["reset_context"](reply_func_tuple["context"])
+ if reply_func_tuple["reset_config"] is not None:
+ reply_func_tuple["reset_config"](reply_func_tuple["config"])
else:
- reply_func_tuple["context"] = copy.copy(reply_func_tuple["init_context"])
+ reply_func_tuple["config"] = copy.copy(reply_func_tuple["init_config"])
def stop_reply_at_receive(self, sender: Optional[Agent] = None):
"""Reset the reply_at_receive of the sender."""
@@ -530,10 +574,10 @@ def generate_oai_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
- context: Optional[Any] = None,
+ config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
"""Generate a reply using autogen.oai."""
- llm_config = self.llm_config if context is None else context
+ llm_config = self.llm_config if config is None else config
if llm_config is False:
return False, None
if messages is None:
@@ -549,36 +593,44 @@ def generate_code_execution_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
- context: Optional[Any] = None,
+ config: Optional[Any] = None,
):
"""Generate a reply using code execution."""
- code_execution_config = context if context is not None else self._code_execution_config
+ code_execution_config = config if config is not None else self._code_execution_config
if code_execution_config is False:
return False, None
if messages is None:
messages = self._oai_messages[sender]
- message = messages[-1]
- code_blocks = extract_code(message["content"])
- if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
- # no code block is found, lang should be `UNKNOWN`
- return False, None
- # code_blocks, _ = find_code(messages, sys_msg=self._oai_system_message, **self.llm_config)
- # if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
- # return code_blocks[0][1]
- # try to execute the code
- exitcode, logs = self.execute_code_blocks(code_blocks)
- exitcode2str = "execution succeeded" if exitcode == 0 else "execution failed"
+ last_n_messages = code_execution_config.pop("last_n_messages", 1)
+ for i in range(last_n_messages):
+ message = messages[-(i + 1)]
+ code_blocks = extract_code(message["content"])
+ if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
+ # no code block is found, lang should be `UNKNOWN`
+
+ if i == last_n_messages - 1:
+ code_execution_config["last_n_messages"] = last_n_messages
+ return False, None
+ continue
+ # code_blocks, _ = find_code(messages, sys_msg=self._oai_system_message, **self.llm_config)
+ # if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
+ # return code_blocks[0][1]
+ # try to execute the code
+ exitcode, logs = self.execute_code_blocks(code_blocks)
+ exitcode2str = "execution succeeded" if exitcode == 0 else "execution failed"
+ break
+ code_execution_config["last_n_messages"] = last_n_messages
return True, f"exitcode: {exitcode} ({exitcode2str})\nCode output: {logs}"
def generate_function_call_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
- context: Optional[Any] = None,
+ config: Optional[Any] = None,
):
"""Generate a reply using function call."""
- if context is None:
- context = self
+ if config is None:
+ config = self
if messages is None:
messages = self._oai_messages[sender]
message = messages[-1]
@@ -591,11 +643,11 @@ def check_termination_and_human_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
- context: Optional[Any] = None,
+ config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
"""Check if the conversation should be terminated, and if human reply is provided."""
- if context is None:
- context = self
+ if config is None:
+ config = self
if messages is None:
messages = self._oai_messages[sender]
message = messages[-1]
@@ -697,9 +749,7 @@ def generate_reply(
if asyncio.coroutines.iscoroutinefunction(reply_func):
continue
if self._match_trigger(reply_func_tuple["trigger"], sender):
- final, reply = reply_func(
- self, messages=messages, sender=sender, context=reply_func_tuple["context"]
- )
+ final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
if final:
return reply
return self._default_auto_reply
@@ -743,11 +793,11 @@ async def a_generate_reply(
if self._match_trigger(reply_func_tuple["trigger"], sender):
if asyncio.coroutines.iscoroutinefunction(reply_func):
final, reply = await reply_func(
- self, messages=messages, sender=sender, context=reply_func_tuple["context"]
+ self, messages=messages, sender=sender, config=reply_func_tuple["config"]
)
else:
final, reply = reply_func(
- self, messages=messages, sender=sender, context=reply_func_tuple["context"]
+ self, messages=messages, sender=sender, config=reply_func_tuple["config"]
)
if final:
return reply
diff --git a/flaml/autogen/agentchat/user_proxy_agent.py b/flaml/autogen/agentchat/user_proxy_agent.py
index 7d622a0af6..7803c810f3 100644
--- a/flaml/autogen/agentchat/user_proxy_agent.py
+++ b/flaml/autogen/agentchat/user_proxy_agent.py
@@ -60,6 +60,7 @@ def __init__(
If the code is executed in the current environment,
the code must be trusted.
- timeout (Optional, int): The maximum execution time in seconds.
+ - last_n_messages (Experimental, Optional, int): The number of messages to look back for code execution. Default to 1.
default_auto_reply (str or dict or None): the default auto reply message when no code execution or llm based reply is generated.
llm_config (dict or False): llm inference configuration.
Please refer to [autogen.Completion.create](/docs/reference/autogen/oai/completion#create)
diff --git a/flaml/version.py b/flaml/version.py
index 14693e1132..8c0d5d5bb2 100644
--- a/flaml/version.py
+++ b/flaml/version.py
@@ -1 +1 @@
-__version__ = "2.0.0rc5"
+__version__ = "2.0.0"
diff --git a/notebook/autogen_agentchat_MathChat.ipynb b/notebook/autogen_agentchat_MathChat.ipynb
index 1c369d39bd..cb9f2469d1 100644
--- a/notebook/autogen_agentchat_MathChat.ipynb
+++ b/notebook/autogen_agentchat_MathChat.ipynb
@@ -31,7 +31,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# %pip install flaml[mathchat]~=2.0.0rc4"
+ "# %pip install flaml[mathchat]~=2.0.0"
]
},
{
diff --git a/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb b/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
index 73cbfc57e0..b2432cb45f 100644
--- a/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
+++ b/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
@@ -45,7 +45,7 @@
},
"outputs": [],
"source": [
- "# %pip install flaml[autogen]~=2.0.0rc4"
+ "# %pip install flaml[autogen]~=2.0.0"
]
},
{
@@ -778,7 +778,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.17"
+ "version": "3.9.16"
},
"vscode": {
"interpreter": {
diff --git a/notebook/autogen_agentchat_chess.ipynb b/notebook/autogen_agentchat_chess.ipynb
index 720ec9c3c5..f7dbf41d60 100644
--- a/notebook/autogen_agentchat_chess.ipynb
+++ b/notebook/autogen_agentchat_chess.ipynb
@@ -32,7 +32,7 @@
"outputs": [],
"source": [
"%%capture --no-stderr\n",
- "# %pip install flaml[autogen]~=2.0.0rc5\n",
+ "# %pip install flaml[autogen]~=2.0.0\n",
"%pip install chess -U"
]
},
@@ -156,6 +156,8 @@
"# ]\n",
"\n",
"class BoardAgent(autogen.AssistantAgent):\n",
+ " board: chess.Board\n",
+ " correct_move_messages: Dict[autogen.Agent, List[Dict]]\n",
"\n",
" def __init__(self, board: chess.Board):\n",
" super().__init__(\n",
@@ -165,28 +167,26 @@
" max_consecutive_auto_reply=10,\n",
" )\n",
" self.register_auto_reply(autogen.ResponsiveAgent, BoardAgent._generate_board_reply)\n",
- " self._board = board\n",
- " self._correct_move_messages = defaultdict(list)\n",
+ " self.board = board\n",
+ " self.correct_move_messages = defaultdict(list)\n",
"\n",
" def _generate_board_reply(\n",
" self,\n",
" messages: Optional[List[Dict]] = None,\n",
" sender: Optional[autogen.Agent] = None,\n",
- " context: Optional[Any] = None,\n",
+ " config: Optional[Any] = None,\n",
" ) -> Union[str, Dict, None]:\n",
" # Filter for messages that do not contain error.\n",
- " if messages is None:\n",
- " messages = self._oai_messages[sender]\n",
" message = messages[-1]\n",
" assert message.get(\"role\") == \"user\"\n",
" # extract a UCI move from player's message\n",
- " reply = self.generate_reply(self._correct_move_messages[sender] + [message], sender, exclude=[BoardAgent._generate_board_reply])\n",
+ " reply = self.generate_reply(self.correct_move_messages[sender] + [message], sender, exclude=[BoardAgent._generate_board_reply])\n",
" if isinstance(reply, str):\n",
" uci_move = reply\n",
" else:\n",
" uci_move = str(reply[\"content\"])\n",
" try:\n",
- " self._board.push_uci(uci_move)\n",
+ " self.board.push_uci(uci_move)\n",
" except ValueError as e:\n",
" # invalid move\n",
" error = f\"Error: {e}\"\n",
@@ -194,9 +194,9 @@
" else:\n",
" # valid move\n",
" m = chess.Move.from_uci(uci_move)\n",
- " display(chess.svg.board(self._board, arrows=[(m.from_square, m.to_square)], fill={m.from_square: \"gray\"}, size=200))\n",
- " self._correct_move_messages[sender].extend([message, self._message_to_dict(uci_move)])\n",
- " self._correct_move_messages[sender][-1][\"role\"] = \"assistant\"\n",
+ " display(chess.svg.board(self.board, arrows=[(m.from_square, m.to_square)], fill={m.from_square: \"gray\"}, size=200))\n",
+ " self.correct_move_messages[sender].extend([message, self._message_to_dict(uci_move)])\n",
+ " self.correct_move_messages[sender][-1][\"role\"] = \"assistant\"\n",
" return True, uci_move\n"
]
},
@@ -243,21 +243,19 @@
" max_consecutive_auto_reply=max_turns,\n",
" **kwargs,\n",
" )\n",
- " self.register_auto_reply(BoardAgent, ChessPlayerAgent._generate_reply_for_board)\n",
- " self.register_auto_reply(ChessPlayerAgent, ChessPlayerAgent._generate_reply_for_player)\n",
- " self._board_agent = board_agent\n",
- " self.update_max_consecutive_auto_reply(self._board_agent.max_consecutive_auto_reply(), self._board_agent)\n",
+ " self.register_auto_reply(BoardAgent, ChessPlayerAgent._generate_reply_for_board, config=board_agent.board)\n",
+ " self.register_auto_reply(ChessPlayerAgent, ChessPlayerAgent._generate_reply_for_player, config=board_agent)\n",
+ " self.update_max_consecutive_auto_reply(board_agent.max_consecutive_auto_reply(), board_agent)\n",
"\n",
" def _generate_reply_for_board(\n",
" self,\n",
" messages: Optional[List[Dict]] = None,\n",
" sender: Optional[autogen.Agent] = None,\n",
- " context: Optional[Any] = None,\n",
+ " config: Optional[chess.Board] = None,\n",
" ) -> Union[str, Dict, None]:\n",
- " if messages is None:\n",
- " messages = self._oai_messages[sender]\n",
+ " board = config\n",
" # add a system message about the current state of the board.\n",
- " board_state_msg = [{\"role\": \"system\", \"content\": f\"Current board:\\n{self._board_agent._board}\"}]\n",
+ " board_state_msg = [{\"role\": \"system\", \"content\": f\"Current board:\\n{board}\"}]\n",
" last_message = messages[-1]\n",
" if last_message[\"content\"].startswith(\"Error\"):\n",
" # try again\n",
@@ -270,25 +268,25 @@
" self,\n",
" messages: Optional[List[Dict]] = None,\n",
" sender: Optional[autogen.Agent] = None,\n",
- " context: Optional[Any] = None,\n",
+ " config: Optional[BoardAgent] = None,\n",
" ) -> Union[str, Dict, None]:\n",
- " if messages is None:\n",
- " messages = self._oai_messages[sender]\n",
+ " board_agent = config\n",
" # add a system message about the current state of the board.\n",
- " board_state_msg = [{\"role\": \"system\", \"content\": f\"Current board:\\n{self._board_agent._board}\"}]\n",
+ " board_state_msg = [{\"role\": \"system\", \"content\": f\"Current board:\\n{board_agent.board}\"}]\n",
" # propose a reply which will be sent to the board agent for verification.\n",
" message = self.generate_reply(messages + board_state_msg, sender, exclude=[ChessPlayerAgent._generate_reply_for_player])\n",
" if message is None:\n",
" return True, None\n",
" # converse with the board until a legal move is made or max allowed retries.\n",
- " self.initiate_chat(self._board_agent, clear_history=False, message=message)\n",
+ " # change silent to False to see that conversation.\n",
+ " self.initiate_chat(board_agent, clear_history=False, message=message, silent=True)\n",
" # last message sent by the board agent\n",
- " last_message = self._oai_messages[self._board_agent][-1]\n",
+ " last_message = self._oai_messages[board_agent][-1]\n",
" if last_message[\"role\"] == \"assistant\":\n",
" # didn't make a legal move after a limit times of retries.\n",
" print(f\"{self.name}: I yield.\")\n",
" return True, None\n",
- " return True, self._oai_messages[self._board_agent][-2]\n"
+ " return True, self._oai_messages[board_agent][-2]\n"
]
},
{
@@ -344,12 +342,6 @@
"\n",
"Your turn.\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
- "\n",
- "e2e4. \n",
- "A classic opening move, my friend. Let's see how this game unfolds. Your move.\n",
- "\n",
"--------------------------------------------------------------------------------\n"
]
},
@@ -376,21 +368,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "e2e4\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer white\u001b[0m (to Player black):\n",
"\n",
- "e2e4. \n",
- "A classic opening move, my friend. Let's see how this game unfolds. Your move.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer black\u001b[0m (to BoardAgent):\n",
+ "Alright, let's kick things off. I'll move my pawn from e2 to e4. The center of the board is the heart of the battle, isn't it? Your move. \n",
"\n",
- "e7e5. \n",
- "Matching your classic opening with one of my own. Let's keep the game interesting. Your move.\n",
+ "e2e4\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -418,21 +400,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player black):\n",
- "\n",
- "e7e5\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer black\u001b[0m (to Player white):\n",
"\n",
- "e7e5. \n",
- "Matching your classic opening with one of my own. Let's keep the game interesting. Your move.\n",
+ "Ah, the King's Pawn Opening, a classic. Let's see how this plays out. I'll move my pawn from e7 to e5. \n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
+ "e7e5\n",
"\n",
- "g1f3. \n",
- "Aiming to control the center of the board. Your move.\n",
+ "You know, I've always admired how chess reflects life. It's all about making strategic decisions, isn't it? Your move.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -460,27 +434,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "g1f3\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer white\u001b[0m (to Player black):\n",
"\n",
- "g1f3. \n",
- "Aiming to control the center of the board. Your move.\n",
- "\n",
- "--------------------------------------------------------------------------------\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[33mPlayer black\u001b[0m (to BoardAgent):\n",
+ "Indeed, chess is a mirror of life in many ways. Every move matters, just like every decision in life. I'll move my knight from g1 to f3, aiming to control the center.\n",
"\n",
- "g8f6. \n",
- "Aiming to control the center as well. Your move.\n",
+ "g1f3\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -508,21 +466,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player black):\n",
- "\n",
- "g8f6\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer black\u001b[0m (to Player white):\n",
"\n",
- "g8f6. \n",
- "Aiming to control the center as well. Your move.\n",
+ "Well said! Chess is indeed a game of deep strategy and foresight. I will move my knight from g8 to f6, matching your control of the center.\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
+ "g8f6\n",
"\n",
- "b1c3. \n",
- "Developing my knight to a good square. Your move.\n",
+ "Did you know, the knight's move is actually based on the 'L' shape, which stands for 'leap'? Quite interesting, isn't it? Your move.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -534,13 +484,13 @@
"p p p p . p p p\n",
". . . . . n . .\n",
". . . . p . . .\n",
- ". . . . P . . .\n",
- ". . N . . N . .\n",
+ ". . B . P . . .\n",
+ ". . . . . N . .\n",
"P P P P . P P P\n",
- "R . B Q K B . R"
+ "R N B Q K . . R"
],
"text/plain": [
- "''"
+ "''"
]
},
"metadata": {},
@@ -550,21 +500,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "b1c3\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer white\u001b[0m (to Player black):\n",
"\n",
- "b1c3. \n",
- "Developing my knight to a good square. Your move.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer black\u001b[0m (to BoardAgent):\n",
+ "That's a fun fact! The knight's L-shaped leap makes it a unique piece on the board. Now, I'll move my bishop from f1 to c4, putting a little pressure on your side.\n",
"\n",
- "b8c6. \n",
- "Developing my knight to match yours. Your move.\n",
+ "f1c4\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -572,17 +512,17 @@
{
"data": {
"image/svg+xml": [
- ""
],
"text/plain": [
- "'
r . b q k b . r\\np p p p . p p p\\n. . n . . n . .\\n. . . . p . . .\\n. . . . P . . .\\n. . N . . N . .\\nP P P P . P P P\\nR . B Q K B . R
'"
+ "'
r n b q k . . r\\np p p p . p p p\\n. . . . . n . .\\n. . b . p . . .\\n. . B . P . . .\\n. . . . . N . .\\nP P P P . P P P\\nR N B Q K . . R
'"
]
},
"metadata": {},
@@ -592,21 +532,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player black):\n",
- "\n",
- "b8c6\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer black\u001b[0m (to Player white):\n",
"\n",
- "b8c6. \n",
- "Developing my knight to match yours. Your move.\n",
+ "A strong move, indeed! I can see you're not one to hold back. I'll move my bishop from f8 to c5, maintaining the balance.\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
+ "f8c5\n",
"\n",
- "d2d4.\n",
- "Challenging your pawn in the center. Your move.\n",
+ "You know, the bishop was originally an elephant in ancient chess. It's fascinating how the game has evolved over the centuries, isn't it? Your move.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -614,17 +546,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b q k b . r\n",
+ "
r n b q k . . r\n",
"p p p p . p p p\n",
- ". . n . . n . .\n",
- ". . . . p . . .\n",
- ". . . P P . . .\n",
- ". . N . . N . .\n",
- "P P P . . P P P\n",
- "R . B Q K B . R
"
+ ". . . . . n . .\n",
+ ". . b . p . . .\n",
+ ". . B . P . . .\n",
+ ". . . . . N . .\n",
+ "P P P P . P P P\n",
+ "R N B Q . R K .
"
],
"text/plain": [
- "'
r . b q k b . r\\np p p p . p p p\\n. . n . . n . .\\n. . . . p . . .\\n. . . P P . . .\\n. . N . . N . .\\nP P P . . P P P\\nR . B Q K B . R
'"
+ "'
r n b q k . . r\\np p p p . p p p\\n. . . . . n . .\\n. . b . p . . .\\n. . B . P . . .\\n. . . . . N . .\\nP P P P . P P P\\nR N B Q . R K .
'"
]
},
"metadata": {},
@@ -634,21 +566,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "d2d4\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer white\u001b[0m (to Player black):\n",
"\n",
- "d2d4.\n",
- "Challenging your pawn in the center. Your move.\n",
+ "Absolutely, the evolution of chess is quite fascinating. It's a testament to the game's timeless appeal. Now, I'll castle kingside to ensure my king's safety. \n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer black\u001b[0m (to BoardAgent):\n",
- "\n",
- "e5d4. \n",
- "Taking your pawn. Your move.\n",
+ "e1g1\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -656,17 +578,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b q k b . r\n",
+ "
r n b q . r k .\n",
"p p p p . p p p\n",
- ". . n . . n . .\n",
- ". . . . . . . .\n",
- ". . . p P . . .\n",
- ". . N . . N . .\n",
- "P P P . . P P P\n",
- "R . B Q K B . R
"
+ ". . . . . n . .\n",
+ ". . b . p . . .\n",
+ ". . B . P . . .\n",
+ ". . . . . N . .\n",
+ "P P P P . P P P\n",
+ "R N B Q . R K .
"
],
"text/plain": [
- "'
r . b q k b . r\\np p p p . p p p\\n. . n . . n . .\\n. . . . . . . .\\n. . . p P . . .\\n. . N . . N . .\\nP P P . . P P P\\nR . B Q K B . R
'"
+ "'
r n b q . r k .\\np p p p . p p p\\n. . . . . n . .\\n. . b . p . . .\\n. . B . P . . .\\n. . . . . N . .\\nP P P P . P P P\\nR N B Q . R K .
'"
]
},
"metadata": {},
@@ -676,21 +598,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player black):\n",
- "\n",
- "e5d4\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer black\u001b[0m (to Player white):\n",
"\n",
- "e5d4. \n",
- "Taking your pawn. Your move.\n",
+ "A wise decision, safety first! I'll follow your lead and castle kingside as well. \n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
+ "e8g8\n",
"\n",
- "f3d4.\n",
- "Taking back the pawn and keeping the center balanced. Your move.\n",
+ "They say the king is a fighting piece, but in the early game, it's all about keeping him safe, isn't it? Your move.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -698,17 +612,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b q k b . r\n",
+ "
r n b q . r k .\n",
"p p p p . p p p\n",
- ". . n . . n . .\n",
- ". . . . . . . .\n",
- ". . . N P . . .\n",
- ". . N . . . . .\n",
+ ". . . . . n . .\n",
+ ". . b . p . . .\n",
+ ". . B P P . . .\n",
+ ". . . . . N . .\n",
"P P P . . P P P\n",
- "R . B Q K B . R
"
+ "R N B Q . R K .
"
],
"text/plain": [
- "'
r . b q k b . r\\np p p p . p p p\\n. . n . . n . .\\n. . . . . . . .\\n. . . N P . . .\\n. . N . . . . .\\nP P P . . P P P\\nR . B Q K B . R
'"
+ "'
r n b q . r k .\\np p p p . p p p\\n. . . . . n . .\\n. . b . p . . .\\n. . B P P . . .\\n. . . . . N . .\\nP P P . . P P P\\nR N B Q . R K .
'"
]
},
"metadata": {},
@@ -718,21 +632,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "f3d4\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer white\u001b[0m (to Player black):\n",
"\n",
- "f3d4.\n",
- "Taking back the pawn and keeping the center balanced. Your move.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer black\u001b[0m (to BoardAgent):\n",
+ "I see, it looks like we had a bit of a mix-up. Let's get back to the game. I'll move my pawn from d2 to d4, opening up lines for my queen and bishop.\n",
"\n",
- "c6d4. \n",
- "Taking your knight with my knight. Your move.\n",
+ "d2d4\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -740,17 +644,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b q k b . r\n",
+ "
r n b q . r k .\n",
"p p p p . p p p\n",
- ". . . . . n . .\n",
- ". . . . . . . .\n",
- ". . . n P . . .\n",
- ". . N . . . . .\n",
+ ". b . . . n . .\n",
+ ". . . . p . . .\n",
+ ". . B P P . . .\n",
+ ". . . . . N . .\n",
"P P P . . P P P\n",
- "R . B Q K B . R
"
+ "R N B Q . R K .
"
],
"text/plain": [
- "'
r . b q k b . r\\np p p p . p p p\\n. . . . . n . .\\n. . . . . . . .\\n. . . n P . . .\\n. . N . . . . .\\nP P P . . P P P\\nR . B Q K B . R
'"
+ "'
r n b q . r k .\\np p p p . p p p\\n. b . . . n . .\\n. . . . p . . .\\n. . B P P . . .\\n. . . . . N . .\\nP P P . . P P P\\nR N B Q . R K .
'"
]
},
"metadata": {},
@@ -760,21 +664,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player black):\n",
- "\n",
- "c6d4\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer black\u001b[0m (to Player white):\n",
"\n",
- "c6d4. \n",
- "Taking your knight with my knight. Your move.\n",
+ "Indeed, opening lines for your major pieces is a crucial part of the game. I'll move my bishop from c5 to b6. \n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
+ "c5b6\n",
"\n",
- "q1d4.\n",
- "Taking back the knight with my queen. Your move.\n",
+ "Chess is a lot like a dance, don't you think? Each piece moving in harmony with the others. Your move.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -782,17 +678,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b q k b . r\n",
+ "
r n b q . r k .\n",
"p p p p . p p p\n",
- ". . . . . n . .\n",
- ". . . . . . . .\n",
- ". . . Q P . . .\n",
- ". . N . . . . .\n",
- "P P P . . P P P\n",
- "R . B . K B . R
"
+ ". b . . . n . .\n",
+ ". . . . p . . .\n",
+ ". . B P P . . .\n",
+ ". . . . . N . .\n",
+ "P P P . Q P P P\n",
+ "R N B . . R K .
"
],
"text/plain": [
- "'
r . b q k b . r\\np p p p . p p p\\n. . . . . n . .\\n. . . . . . . .\\n. . . Q P . . .\\n. . N . . . . .\\nP P P . . P P P\\nR . B . K B . R
'"
+ "'
r n b q . r k .\\np p p p . p p p\\n. b . . . n . .\\n. . . . p . . .\\n. . B P P . . .\\n. . . . . N . .\\nP P P . Q P P P\\nR N B . . R K .
'"
]
},
"metadata": {},
@@ -802,21 +698,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "d1d4\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer white\u001b[0m (to Player black):\n",
"\n",
- "q1d4.\n",
- "Taking back the knight with my queen. Your move.\n",
+ "I see, it looks like we had a bit of a mix-up. Let's get back to the game. I'll move my queen from d1 to e2, aiming to control the center.\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer black\u001b[0m (to BoardAgent):\n",
- "\n",
- "d7d5. \n",
- "Pushing my pawn to challenge your Queen. Your move.\n",
+ "d1e2\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -824,17 +710,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b q k b . r\n",
- "p p p . . p p p\n",
- ". . . . . n . .\n",
- ". . . p . . . .\n",
- ". . . Q P . . .\n",
- ". . N . . . . .\n",
- "P P P . . P P P\n",
- "R . B . K B . R
"
+ "
r n b . . r k .\n",
+ "p p p p q p p p\n",
+ ". b . . . n . .\n",
+ ". . . . p . . .\n",
+ ". . B P P . . .\n",
+ ". . . . . N . .\n",
+ "P P P . Q P P P\n",
+ "R N B . . R K .
"
],
"text/plain": [
- "'
r . b q k b . r\\np p p . . p p p\\n. . . . . n . .\\n. . . p . . . .\\n. . . Q P . . .\\n. . N . . . . .\\nP P P . . P P P\\nR . B . K B . R
'"
+ "'
r n b . . r k .\\np p p p q p p p\\n. b . . . n . .\\n. . . . p . . .\\n. . B P P . . .\\n. . . . . N . .\\nP P P . Q P P P\\nR N B . . R K .
'"
]
},
"metadata": {},
@@ -844,21 +730,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player black):\n",
- "\n",
- "d7d5\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer black\u001b[0m (to Player white):\n",
"\n",
- "d7d5. \n",
- "Pushing my pawn to challenge your Queen. Your move.\n",
+ "Indeed, control of the center is key. I'll move my queen from d8 to e7, matching your control of the center.\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
+ "d8e7\n",
"\n",
- "e4d5.\n",
- "Taking your pawn with my pawn. Your move.\n",
+ "Did you know the queen wasn't always the most powerful piece on the board? In the original game of chess, the piece could only move one square diagonally! Your move.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -866,17 +744,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b q k b . r\n",
- "p p p . . p p p\n",
- ". . . . . n . .\n",
- ". . . P . . . .\n",
- ". . . Q . . . .\n",
- ". . N . . . . .\n",
- "P P P . . P P P\n",
- "R . B . K B . R
"
+ "
r n b . . r k .\n",
+ "p p p p q p p p\n",
+ ". b . . . n . .\n",
+ ". . . . P . . .\n",
+ ". . B . P . . .\n",
+ ". . . . . N . .\n",
+ "P P P . Q P P P\n",
+ "R N B . . R K .
"
],
"text/plain": [
- "'
r . b q k b . r\\np p p . . p p p\\n. . . . . n . .\\n. . . P . . . .\\n. . . Q . . . .\\n. . N . . . . .\\nP P P . . P P P\\nR . B . K B . R
'"
+ "'
r n b . . r k .\\np p p p q p p p\\n. b . . . n . .\\n. . . . P . . .\\n. . B . P . . .\\n. . . . . N . .\\nP P P . Q P P P\\nR N B . . R K .
'"
]
},
"metadata": {},
@@ -886,21 +764,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "e4d5\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer white\u001b[0m (to Player black):\n",
"\n",
- "e4d5.\n",
- "Taking your pawn with my pawn. Your move.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer black\u001b[0m (to BoardAgent):\n",
+ "Indeed, the queen has come a long way from its humble beginnings. Now, I'll move my pawn from d4 to e5, adding some tension to the center.\n",
"\n",
- "f6d5. \n",
- "Taking your pawn with my knight. Your move.\n",
+ "d4e5\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -908,17 +776,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b q k b . r\n",
- "p p p . . p p p\n",
- ". . . . . . . .\n",
- ". . . n . . . .\n",
- ". . . Q . . . .\n",
- ". . N . . . . .\n",
- "P P P . . P P P\n",
- "R . B . K B . R
"
+ "
r n b . . r k .\n",
+ "p p p p q p p p\n",
+ ". b . . . . . .\n",
+ ". . . n P . . .\n",
+ ". . B . P . . .\n",
+ ". . . . . N . .\n",
+ "P P P . Q P P P\n",
+ "R N B . . R K .
"
],
"text/plain": [
- "'
r . b q k b . r\\np p p . . p p p\\n. . . . . . . .\\n. . . n . . . .\\n. . . Q . . . .\\n. . N . . . . .\\nP P P . . P P P\\nR . B . K B . R
'"
+ "'
r n b . . r k .\\np p p p q p p p\\n. b . . . . . .\\n. . . n P . . .\\n. . B . P . . .\\n. . . . . N . .\\nP P P . Q P P P\\nR N B . . R K .
'"
]
},
"metadata": {},
@@ -928,21 +796,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player black):\n",
- "\n",
- "f6d5\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer black\u001b[0m (to Player white):\n",
"\n",
- "f6d5. \n",
- "Taking your pawn with my knight. Your move.\n",
+ "Interesting move! This is shaping up to be quite the game. I'll move my knight from f6 to d5.\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
+ "f6d5\n",
"\n",
- "c3d5.\n",
- "Taking your knight with my knight. Your move.\n",
+ "You know, the knight is the only piece that can jump over others. It's like the horse in a game of polo, leaping over obstacles. Your move.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -950,17 +810,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b q k b . r\n",
- "p p p . . p p p\n",
- ". . . . . . . .\n",
- ". . . N . . . .\n",
- ". . . Q . . . .\n",
- ". . . . . . . .\n",
- "P P P . . P P P\n",
- "R . B . K B . R
"
+ "
r n b . . r k .\n",
+ "p p p p q p p p\n",
+ ". b . . . . . .\n",
+ ". . . n P . . .\n",
+ ". . B . P . . .\n",
+ ". . N . . N . .\n",
+ "P P P . Q P P P\n",
+ "R . B . . R K .
"
],
"text/plain": [
- "'
r . b q k b . r\\np p p . . p p p\\n. . . . . . . .\\n. . . N . . . .\\n. . . Q . . . .\\n. . . . . . . .\\nP P P . . P P P\\nR . B . K B . R
'"
+ "'
r n b . . r k .\\np p p p q p p p\\n. b . . . . . .\\n. . . n P . . .\\n. . B . P . . .\\n. . N . . N . .\\nP P P . Q P P P\\nR . B . . R K .
'"
]
},
"metadata": {},
@@ -970,21 +830,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "c3d5\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer white\u001b[0m (to Player black):\n",
"\n",
- "c3d5.\n",
- "Taking your knight with my knight. Your move.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer black\u001b[0m (to BoardAgent):\n",
+ "Indeed, the knight's ability to jump over other pieces is quite unique. Now, I'll move my knight from b1 to c3, challenging your knight.\n",
"\n",
- "c7c6. \n",
- "Challenging your knight. Your move.\n",
+ "b1c3\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -992,17 +842,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b q k b . r\n",
- "p p . . . p p p\n",
- ". . p . . . . .\n",
- ". . . N . . . .\n",
- ". . . Q . . . .\n",
- ". . . . . . . .\n",
- "P P P . . P P P\n",
- "R . B . K B . R
"
+ "
r . b . . r k .\n",
+ "p p p p q p p p\n",
+ ". b n . . . . .\n",
+ ". . . n P . . .\n",
+ ". . B . P . . .\n",
+ ". . N . . N . .\n",
+ "P P P . Q P P P\n",
+ "R . B . . R K .
"
],
"text/plain": [
- "'
r . b q k b . r\\np p . . . p p p\\n. . p . . . . .\\n. . . N . . . .\\n. . . Q . . . .\\n. . . . . . . .\\nP P P . . P P P\\nR . B . K B . R
'"
+ "'
r . b . . r k .\\np p p p q p p p\\n. b n . . . . .\\n. . . n P . . .\\n. . B . P . . .\\n. . N . . N . .\\nP P P . Q P P P\\nR . B . . R K .
'"
]
},
"metadata": {},
@@ -1012,21 +862,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player black):\n",
- "\n",
- "c7c6\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer black\u001b[0m (to Player white):\n",
"\n",
- "c7c6. \n",
- "Challenging your knight. Your move.\n",
+ "Apologies for the confusion. It seems there was a misunderstanding. I'll correct that and move my knight from b8 to c6.\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
+ "b8c6\n",
"\n",
- "d5f6.\n",
- "Moving my knight to capture your pawn, putting your king in check. Your move.\n",
+ "Did you know that in the Middle Ages, the knight piece was usually carved to resemble a horse's head? Your move.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -1034,17 +876,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b q k b . r\n",
- "p p . . . p p p\n",
- ". . p . . N . .\n",
- ". . . . . . . .\n",
- ". . . Q . . . .\n",
- ". . . . . . . .\n",
- "P P P . . P P P\n",
- "R . B . K B . R
"
+ "
r . b . . r k .\n",
+ "p p p p q p p p\n",
+ ". b n . . . . .\n",
+ ". . . n P . B .\n",
+ ". . B . P . . .\n",
+ ". . N . . N . .\n",
+ "P P P . Q P P P\n",
+ "R . . . . R K .
"
],
"text/plain": [
- "'
r . b q k b . r\\np p . . . p p p\\n. . p . . N . .\\n. . . . . . . .\\n. . . Q . . . .\\n. . . . . . . .\\nP P P . . P P P\\nR . B . K B . R
'"
+ "'
r . b . . r k .\\np p p p q p p p\\n. b n . . . . .\\n. . . n P . B .\\n. . B . P . . .\\n. . N . . N . .\\nP P P . Q P P P\\nR . . . . R K .
'"
]
},
"metadata": {},
@@ -1054,32 +896,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "d5f6\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer white\u001b[0m (to Player black):\n",
"\n",
- "d5f6.\n",
- "Moving my knight to capture your pawn, putting your king in check. Your move.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer black\u001b[0m (to BoardAgent):\n",
- "\n",
- "g8f6. \n",
- "Moving my king to capture your knight and getting out of check. Your move.\n",
+ "That's a fascinating bit of history! The knight piece's design certainly adds to the charm of the game. Now, I'll move my bishop from c1 to g5, putting a little pressure on your queen.\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mBoardAgent\u001b[0m (to Player black):\n",
- "\n",
- "Error: illegal uci: 'g8f6' in r1bqkb1r/pp3ppp/2p2N2/8/3Q4/8/PPP2PPP/R1B1KB1R b KQkq - 1 9\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer black\u001b[0m (to BoardAgent):\n",
- "\n",
- "d8f6. \n",
- "My mistake, capturing your knight with my queen. Your move.\n",
+ "c1g5\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -1087,17 +908,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b . k b . r\n",
- "p p . . . p p p\n",
- ". . p . . q . .\n",
- ". . . . . . . .\n",
- ". . . Q . . . .\n",
- ". . . . . . . .\n",
- "P P P . . P P P\n",
- "R . B . K B . R
"
+ "
r . b . . r k .\n",
+ "p p p p . p p p\n",
+ ". b n . q . . .\n",
+ ". . . n P . B .\n",
+ ". . B . P . . .\n",
+ ". . N . . N . .\n",
+ "P P P . Q P P P\n",
+ "R . . . . R K .
"
],
"text/plain": [
- "'
r . b . k b . r\\np p . . . p p p\\n. . p . . q . .\\n. . . . . . . .\\n. . . Q . . . .\\n. . . . . . . .\\nP P P . . P P P\\nR . B . K B . R
'"
+ "'
r . b . . r k .\\np p p p . p p p\\n. b n . q . . .\\n. . . n P . B .\\n. . B . P . . .\\n. . N . . N . .\\nP P P . Q P P P\\nR . . . . R K .
'"
]
},
"metadata": {},
@@ -1107,54 +928,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player black):\n",
- "\n",
- "d8f6\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer black\u001b[0m (to Player white):\n",
"\n",
- "d8f6. \n",
- "My mistake, capturing your knight with my queen. Your move.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
- "\n",
- "d4d1.\n",
- "Moving my queen away from your queen's attack. Your move.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "Error: illegal uci: 'd1d4' in r1b1kb1r/pp3ppp/2p2q2/8/3Q4/8/PPP2PPP/R1B1KB1R w KQkq - 0 10\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
+ "Well played! I see you're not one to shy away from a challenge. I'll move my queen from e7 to e6, keeping her safe.\n",
"\n",
- "e1g1.\n",
- "Moving my king to safety with a castle. Your move.\n",
+ "e7e6\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "Error: illegal uci: 'e1g1' in r1b1kb1r/pp3ppp/2p2q2/8/3Q4/8/PPP2PPP/R1B1KB1R w KQkq - 0 10\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
- "\n",
- "g1e2.\n",
- "Moving my king to safety. Your move.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "Error: illegal uci: 'g1e2' in r1b1kb1r/pp3ppp/2p2q2/8/3Q4/8/PPP2PPP/R1B1KB1R w KQkq - 0 10\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer white\u001b[0m (to BoardAgent):\n",
- "\n",
- "f1e2.\n",
- "Moving my bishop to protect my king. Your move.\n",
+ "Did you know that the queen's ability to move any number of squares along a rank, file, or diagonal is a relatively recent development in the history of chess? Your move.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -1162,17 +942,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b . k b . r\n",
- "p p . . . p p p\n",
- ". . p . . q . .\n",
- ". . . . . . . .\n",
- ". . . Q . . . .\n",
- ". . . . . . . .\n",
- "P P P . B P P P\n",
- "R . B . K . . R
"
+ "
r . b . . r k .\n",
+ "p p p p . p p p\n",
+ ". b n . q . . .\n",
+ ". . . n P . B .\n",
+ ". . B . P . . .\n",
+ ". . N . . N . .\n",
+ "P P P . Q P P P\n",
+ ". . . R . R K .
"
],
"text/plain": [
- "'
r . b . k b . r\\np p . . . p p p\\n. . p . . q . .\\n. . . . . . . .\\n. . . Q . . . .\\n. . . . . . . .\\nP P P . B P P P\\nR . B . K . . R
'"
+ "'
r . b . . r k .\\np p p p . p p p\\n. b n . q . . .\\n. . . n P . B .\\n. . B . P . . .\\n. . N . . N . .\\nP P P . Q P P P\\n. . . R . R K .
'"
]
},
"metadata": {},
@@ -1182,21 +962,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player white):\n",
- "\n",
- "f1e2\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mPlayer white\u001b[0m (to Player black):\n",
"\n",
- "f1e2.\n",
- "Moving my bishop to protect my king. Your move.\n",
+ "Indeed, the queen's powers were significantly enhanced during the Middle Ages, transforming the game of chess. Now, I'll move my rook from a1 to d1, aligning it with your queen.\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer black\u001b[0m (to BoardAgent):\n",
- "\n",
- "f8e7. \n",
- "Moving my bishop to protect my king. Your move.\n",
+ "a1d1\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -1204,17 +974,17 @@
{
"data": {
"image/svg+xml": [
- "
r . b . k . . r\n",
- "p p . . b p p p\n",
- ". . p . . q . .\n",
- ". . . . . . . .\n",
- ". . . Q . . . .\n",
- ". . . . . . . .\n",
- "P P P . B P P P\n",
- "R . B . K . . R
"
+ "
r . b . . r k .\n",
+ "p p p . . p p p\n",
+ ". b n p q . . .\n",
+ ". . . n P . B .\n",
+ ". . B . P . . .\n",
+ ". . N . . N . .\n",
+ "P P P . Q P P P\n",
+ ". . . R . R K .
"
],
"text/plain": [
- "'
r . b . k . . r\\np p . . b p p p\\n. . p . . q . .\\n. . . . . . . .\\n. . . Q . . . .\\n. . . . . . . .\\nP P P . B P P P\\nR . B . K . . R
'"
+ "'
r . b . . r k .\\np p p . . p p p\\n. b n p q . . .\\n. . . n P . B .\\n. . B . P . . .\\n. . N . . N . .\\nP P P . Q P P P\\n. . . R . R K .
'"
]
},
"metadata": {},
@@ -1224,15 +994,13 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001b[33mBoardAgent\u001b[0m (to Player black):\n",
+ "\u001b[33mPlayer black\u001b[0m (to Player white):\n",
"\n",
- "f8e7\n",
+ "My apologies for the confusion. Let's correct that. I'll move my pawn from d7 to d6.\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mPlayer black\u001b[0m (to Player white):\n",
+ "d7d6\n",
"\n",
- "f8e7. \n",
- "Moving my bishop to protect my king. Your move.\n",
+ "Did you know that pawns are the soul of chess? It's fascinating how these seemingly insignificant pieces can control the fate of the game. Your move.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
@@ -1259,7 +1027,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.17"
+ "version": "3.9.16"
},
"orig_nbformat": 4
},
diff --git a/notebook/autogen_agentchat_function_call.ipynb b/notebook/autogen_agentchat_function_call.ipynb
index 96d3e4c94d..423e2542fe 100644
--- a/notebook/autogen_agentchat_function_call.ipynb
+++ b/notebook/autogen_agentchat_function_call.ipynb
@@ -17,7 +17,7 @@
"source": [
"# Auto Generated Agent Chat: Task Solving with Provided Tools as Functions\n",
"\n",
- "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
+ "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to make function calls with the new feature of OpenAI models (in model version 0613). A specified prompt and function configs need to be passed to `AssistantAgent` to initialize the agent. The corresponding functions need to be passed to `UserProxyAgent`, which will be responsible for executing any function calls made by `AssistantAgent`. Besides this requirement of matching descriptions with functions, we recommend checking the system message in the `AssistantAgent` to make sure the instructions align with the function call descriptions.\n",
"\n",
@@ -36,7 +36,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# %pip install flaml[mathchat]~=2.0.0rc4"
+ "# %pip install flaml[mathchat]~=2.0.0"
]
},
{
diff --git a/notebook/autogen_agentchat_groupchat.ipynb b/notebook/autogen_agentchat_groupchat.ipynb
index 58e8dcee13..c8cdf32630 100644
--- a/notebook/autogen_agentchat_groupchat.ipynb
+++ b/notebook/autogen_agentchat_groupchat.ipynb
@@ -27,12 +27,12 @@
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"%%capture --no-stderr\n",
- "# %pip install flaml[autogen]~=2.0.0rc5"
+ "# %pip install flaml[autogen]~=2.0.0"
]
},
{
@@ -47,7 +47,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
@@ -119,7 +119,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
@@ -127,6 +127,7 @@
"human = autogen.UserProxyAgent(\n",
" name=\"Human\",\n",
" system_message=\"A human admin.\",\n",
+ " code_execution_config={\"last_n_messages\": 2, \"work_dir\": \"groupchat\"},\n",
")\n",
"alice = autogen.AssistantAgent(\n",
" name=\"Alice\",\n",
@@ -137,7 +138,7 @@
" system_message=\"Code reviewer. Prevent code execution if unsafe or not well documented. Suggest changes. Otherwise, approve and return the final code to execute.\",\n",
" llm_config=llm_config,\n",
")\n",
- "groupchat = autogen.GroupChat(agents=[human, alice, bob], messages=[], max_round=4)\n",
+ "groupchat = autogen.GroupChat(agents=[human, alice, bob], messages=[], max_round=12)\n",
"manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)"
]
},
@@ -151,7 +152,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 8,
"metadata": {},
"outputs": [
{
@@ -163,295 +164,287 @@
"find a latest paper about generative agents\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001b[33mchat_manager\u001b[0m (to Alice):\n",
- "\n",
- "find a latest paper about generative agents\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mchat_manager\u001b[0m (to Bob):\n",
- "\n",
- "find a latest paper about generative agents\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
"\u001b[33mAlice\u001b[0m (to chat_manager):\n",
"\n",
- "As an AI, I am unable to browse or search the web, download or read a file directly. But I can provide you with a Python script to scrape Google Scholar for the latest papers on generative agents.\n",
- "\n",
- "Make sure that you have the BeautifulSoup and requests libraries installed. If not, you can install them using the pip command:\n",
- "\n",
- "```bash\n",
- "pip install beautifulsoup4 requests\n",
- "```\n",
- "\n",
- "Then you can use this Python script to fetch and print the title of the latest paper:\n",
+ "To accomplish this, we can utilize the \"scholarly\" library in Python, which enables us to search Google Scholar for papers. Here's the Python code to achieve this:\n",
"\n",
- "Python code:\n",
"```python\n",
- "import requests\n",
- "from bs4 import BeautifulSoup\n",
+ "# filename: googlescholar_search.py\n",
"\n",
- "# Send HTTP request to Google Scholar with the query \"generative agents\"\n",
- "res = requests.get('https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG=')\n",
+ "import scholarly\n",
"\n",
- "# Parse the HTML content of the page\n",
- "soup = BeautifulSoup(res.text, 'html.parser')\n",
+ "def get_latest_paper(query):\n",
+ " search_query = scholarly.search_pubs(query)\n",
+ " paper = next(search_query)\n",
+ " print(\"The latest paper is:\", paper.bib['title'])\n",
+ " print(\"The abstract of the paper is:\", paper.bib['abstract'])\n",
+ " print(\"The year of publication is:\", paper.bib['year'])\n",
"\n",
- "# Find the first result (which is the latest) and print its title\n",
- "title = soup.find('h3', {'class': 'gs_rt'}).a.text\n",
- "print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
+ "get_latest_paper(\"Generative agents\")\n",
"```\n",
- "Please note that scraping platforms like Google Scholar may not always yield consistent results and is not always advised as it could violate the terms of service. Please use this code responsibly.\n",
"\n",
- "If you are affiliated with a university or an organization that gives you access to paid scientific repositories (like IEEE, Springer, Elsevier), it's best to use those platforms as they provide more specific and legal access to scientific papers.\n",
+ "To execute this script:\n",
+ "1. Save the code to a file named googlescholar_search.py\n",
+ "2. Run `pip install scholarly` to install the necessary library if you haven't installed it yet.\n",
+ "3. Run `python googlescholar_search.py` to execute the script and get the latest paper on generative agents.\n",
"\n",
- "Alternatively, databases like PubMed or arXiv.org provide free access to a large number of scientific papers - you might want to check them out for latest research papers on your topic of interest.\n",
+ "Please note that Google Scholar doesn't provide a stable API and has rate limit restrictions in place, meaning that if you run this code multiple times in a short period, Google might temporarily block your IP.\n",
"\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mchat_manager\u001b[0m (to Human):\n",
+ "If the rate limit becomes an issue, you might want to consider using a different database or method, such as utilizing APIs from databases like arXiv or Pubmed, or web scraping, in a manner compliant with the website's robots.txt and terms of usage.\n",
"\n",
- "As an AI, I am unable to browse or search the web, download or read a file directly. But I can provide you with a Python script to scrape Google Scholar for the latest papers on generative agents.\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mBob\u001b[0m (to chat_manager):\n",
"\n",
- "Make sure that you have the BeautifulSoup and requests libraries installed. If not, you can install them using the pip command:\n",
+ "The provided code seems fine, however, according to Google's Terms of Service API users are restricted from programmatically sending requests to Google Scholar. Even being an unofficial API, it doesn't make using scholarly legal as per the use policy. It's important you consider these limitations when handling this tool and any consequences that may arise on its usage. \n",
"\n",
- "```bash\n",
- "pip install beautifulsoup4 requests\n",
- "```\n",
+ "Remember to use APIs responsibly and always in accordance with their terms of service. Without explicit permission from Google, using such a tool can get your IP banned. \n",
"\n",
- "Then you can use this Python script to fetch and print the title of the latest paper:\n",
+ "I will suggest to use APIs from databases like arXiv or Pubmed, or webscraping, in a manner that is compliant with the website's robots.txt-file and terms of usage.\n",
"\n",
- "Python code:\n",
- "```python\n",
- "import requests\n",
- "from bs4 import BeautifulSoup\n",
- "\n",
- "# Send HTTP request to Google Scholar with the query \"generative agents\"\n",
- "res = requests.get('https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG=')\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mAlice\u001b[0m (to chat_manager):\n",
"\n",
- "# Parse the HTML content of the page\n",
- "soup = BeautifulSoup(res.text, 'html.parser')\n",
+ "I apologize for the oversight. You're correct, direct scraping of Google Scholar violates Google's terms of service. Let's change to use the arXiv API which doesn't have this issue and is more reliable. Here's the python code:\n",
"\n",
- "# Find the first result (which is the latest) and print its title\n",
- "title = soup.find('h3', {'class': 'gs_rt'}).a.text\n",
- "print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
+ "```python\n",
+ "# filename: arxiv_search.py\n",
+ "import urllib\n",
+ "import feedparser\n",
+ "\n",
+ "def search_arxiv(query: str):\n",
+ " base_url = 'http://export.arxiv.org/api/query?'\n",
+ " query = {'search_query' : f'ti:{query}', 'start' : 0, 'max_results' : 1, 'sortBy' : 'submittedDate', 'sortOrder' : 'descending'}\n",
+ " url = base_url + urllib.parse.urlencode(query)\n",
+ " \n",
+ " # connect to arXiv API and get response\n",
+ " response = urllib.request.urlopen(url).read()\n",
+ "\n",
+ " # parse the response using feedparser\n",
+ " feed = feedparser.parse(response)\n",
+ " \n",
+ " # get the first (and presumably, the most recent) article in the result\n",
+ " entry = feed.entries[0]\n",
+ "\n",
+ " # print details of the most recent article\n",
+ " print('The latest paper on', query['search_query'], 'that I could find is:\\n')\n",
+ " print('Title: ', entry.title)\n",
+ " print('Author: ', entry.author)\n",
+ " print('Link: ', entry.link)\n",
+ " print('\\nAbstract: ', entry.summary)\n",
+ "\n",
+ "# search for the latest paper about \"generative agents\"\n",
+ "search_arxiv(\"generative agents\")\n",
"```\n",
- "Please note that scraping platforms like Google Scholar may not always yield consistent results and is not always advised as it could violate the terms of service. Please use this code responsibly.\n",
- "\n",
- "If you are affiliated with a university or an organization that gives you access to paid scientific repositories (like IEEE, Springer, Elsevier), it's best to use those platforms as they provide more specific and legal access to scientific papers.\n",
"\n",
- "Alternatively, databases like PubMed or arXiv.org provide free access to a large number of scientific papers - you might want to check them out for latest research papers on your topic of interest.\n",
+ "To execute this script:\n",
+ "1. Save the code to a file named arxiv_search.py\n",
+ "2. Run `pip install feedparser` to install the necessary library.\n",
+ "3. Run `python arxiv_search.py` to execute the script and get the latest paper on generative agents.\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001b[33mchat_manager\u001b[0m (to Bob):\n",
- "\n",
- "As an AI, I am unable to browse or search the web, download or read a file directly. But I can provide you with a Python script to scrape Google Scholar for the latest papers on generative agents.\n",
+ "\u001b[33mBob\u001b[0m (to chat_manager):\n",
"\n",
- "Make sure that you have the BeautifulSoup and requests libraries installed. If not, you can install them using the pip command:\n",
+ "The provided code snippet is clear, efficient, and well-documented. It appropriately uses the arXiv API to retrieve the most recent papers about \"generative agents\". The search terms are correctly URI-encoded and passed to the arXiv query API, and proper error handling is in place.\n",
"\n",
- "```bash\n",
- "pip install beautifulsoup4 requests\n",
- "```\n",
+ "However, you should ensure that you handle potential exception which may occur when trying to connect to the URL and parse the response. For example, if the internet is disconnected or something is wrong with the server, `urllib.request.urlopen()` will raise a `URLError`. If the returned content is not properly formatted, `feedparser.parse()` may also fail. You should catch and properly handle these exceptions.\n",
"\n",
- "Then you can use this Python script to fetch and print the title of the latest paper:\n",
+ "Therefore, I would add these modifications:\n",
"\n",
- "Python code:\n",
"```python\n",
- "import requests\n",
- "from bs4 import BeautifulSoup\n",
+ "from urllib.error import URLError\n",
"\n",
- "# Send HTTP request to Google Scholar with the query \"generative agents\"\n",
- "res = requests.get('https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG=')\n",
- "\n",
- "# Parse the HTML content of the page\n",
- "soup = BeautifulSoup(res.text, 'html.parser')\n",
- "\n",
- "# Find the first result (which is the latest) and print its title\n",
- "title = soup.find('h3', {'class': 'gs_rt'}).a.text\n",
- "print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
- "```\n",
- "Please note that scraping platforms like Google Scholar may not always yield consistent results and is not always advised as it could violate the terms of service. Please use this code responsibly.\n",
+ "try:\n",
+ " # connect to arXiv API and get response\n",
+ " response = urllib.request.urlopen(url).read()\n",
"\n",
- "If you are affiliated with a university or an organization that gives you access to paid scientific repositories (like IEEE, Springer, Elsevier), it's best to use those platforms as they provide more specific and legal access to scientific papers.\n",
+ "except URLError as e:\n",
+ " print(\"There was a problem connecting to the arXiv API:\")\n",
+ " print(e.reason)\n",
"\n",
- "Alternatively, databases like PubMed or arXiv.org provide free access to a large number of scientific papers - you might want to check them out for latest research papers on your topic of interest.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "\u001b[33mBob\u001b[0m (to chat_manager):\n",
+ "else:\n",
+ " try:\n",
+ " # parse the response using feedparser\n",
+ " feed = feedparser.parse(response)\n",
+ " \n",
+ " # get the first (and presumably, the most recent) article in the result\n",
+ " entry = feed.entries[0]\n",
"\n",
- "Your code as it stands can throw an exception and result in an error if the HTTP request fails or if no search results are found. Also, the use of 'beautifulsoup4' and 'requests' should be well-documented.\n",
+ " except Exception as e:\n",
+ " print(\"There was a problem parsing the result:\")\n",
+ " print(e)\n",
"\n",
- "Here is the more secure and documented code:\n",
+ " else:\n",
+ " # print details of the most recent article\n",
+ " print('The latest paper on', query['search_query'], 'that I could find is:\\n')\n",
+ " print('Title: ', entry.title)\n",
+ " print('Author: ', entry.author)\n",
+ " print('Link: ', entry.link)\n",
+ " print('\\nAbstract: ', entry.summary)\n",
+ "```\n",
"\n",
- "```python\n",
- "import requests\n",
- "from bs4 import BeautifulSoup\n",
+ "The keyword `except` is used to catch and handle exceptions. The modifications suggested include exception handlers for `URLError` (which are raised if there was a problem connecting to the arXiv API) and a generic Exception (which could be any other exception during parsing the response). The `else` keyword allows us to group together the normal operation code, separating it from the error handling code. \n",
"\n",
- "# Function that uses requests.get to fetch an URL's content\n",
- "def get_url_content(url):\n",
- " try:\n",
- " response = requests.get(url)\n",
- " response.raise_for_status()\n",
- " return response.text\n",
- " except (requests.RequestException, ValueError) as error:\n",
- " print(f'Google scholar cannot be accessed because of: {error}')\n",
- " return None\n",
- "\n",
- "# Function to find the title of the latest paper about \"generative agents\"\n",
- "def find_latest_paper(url):\n",
- " html = get_url_content(url)\n",
- " if html:\n",
- " # Parse the HTML content of the page\n",
- " soup = BeautifulSoup(html, 'html.parser')\n",
- " # Find the first result (which is the latest one)\n",
- " result = soup.find('h3', {'class': 'gs_rt'})\n",
- " \n",
- " # If result found, print its title; Otherwise, print paper not found\n",
- " if result:\n",
- " title = result.a.text\n",
- " print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
- " else:\n",
- " print(\"No papers about 'generative agents' found.\")\n",
- " else:\n",
- " print(\"No internet or Google scholar is down.\")\n",
+ "The code is ready to be executed now.\n",
"\n",
- "# URL of Google scholar with a search query \"generative agents\"\n",
- "google_scholar_url = 'https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG='\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
+ "\u001b[33mHuman\u001b[0m (to chat_manager):\n",
"\n",
- "find_latest_paper(google_scholar_url)\n",
- "```\n",
+ "exitcode: 1 (execution failed)\n",
+ "Code output: \n",
+ "Traceback (most recent call last):\n",
+ " File \"\", line 5, in \n",
+ " response = urllib.request.urlopen(url).read()\n",
+ "NameError: name 'urllib' is not defined\n",
"\n",
- "Always use this script carefully because web-scraping isn't always reliable or legal on all web pages. Always ensure you have express permission or that the website's terms and conditions don't forbid this kind of usage.\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001b[33mchat_manager\u001b[0m (to Human):\n",
- "\n",
- "Your code as it stands can throw an exception and result in an error if the HTTP request fails or if no search results are found. Also, the use of 'beautifulsoup4' and 'requests' should be well-documented.\n",
+ "\u001b[33mAlice\u001b[0m (to chat_manager):\n",
"\n",
- "Here is the more secure and documented code:\n",
+ "Apologies for the oversight. It looks like I missed importing the required `urllib.request` module. Please use the following updated code with the necessary import statement:\n",
"\n",
"```python\n",
- "import requests\n",
- "from bs4 import BeautifulSoup\n",
- "\n",
- "# Function that uses requests.get to fetch an URL's content\n",
- "def get_url_content(url):\n",
+ "# filename: arxiv_search.py\n",
+ "import urllib.request\n",
+ "import urllib.parse\n",
+ "import feedparser\n",
+ "from urllib.error import URLError\n",
+ "\n",
+ "def search_arxiv(query: str):\n",
+ " base_url = 'http://export.arxiv.org/api/query?'\n",
+ " query = {'search_query' : f'ti:{query}', 'start' : 0, 'max_results' : 1, 'sortBy' : 'submittedDate', 'sortOrder' : 'descending'}\n",
+ " url = base_url + urllib.parse.urlencode(query)\n",
+ " \n",
" try:\n",
- " response = requests.get(url)\n",
- " response.raise_for_status()\n",
- " return response.text\n",
- " except (requests.RequestException, ValueError) as error:\n",
- " print(f'Google scholar cannot be accessed because of: {error}')\n",
- " return None\n",
- "\n",
- "# Function to find the title of the latest paper about \"generative agents\"\n",
- "def find_latest_paper(url):\n",
- " html = get_url_content(url)\n",
- " if html:\n",
- " # Parse the HTML content of the page\n",
- " soup = BeautifulSoup(html, 'html.parser')\n",
- " # Find the first result (which is the latest one)\n",
- " result = soup.find('h3', {'class': 'gs_rt'})\n",
- " \n",
- " # If result found, print its title; Otherwise, print paper not found\n",
- " if result:\n",
- " title = result.a.text\n",
- " print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
- " else:\n",
- " print(\"No papers about 'generative agents' found.\")\n",
+ " # connect to arXiv API and get response\n",
+ " response = urllib.request.urlopen(url).read()\n",
+ "\n",
+ " except URLError as e:\n",
+ " print(\"There was a problem connecting to the arXiv API:\")\n",
+ " print(e.reason)\n",
+ "\n",
" else:\n",
- " print(\"No internet or Google scholar is down.\")\n",
+ " try:\n",
+ " # parse the response using feedparser\n",
+ " feed = feedparser.parse(response)\n",
+ " \n",
+ " # get the first (and presumably, the most recent) article in the result\n",
+ " entry = feed.entries[0]\n",
"\n",
- "# URL of Google scholar with a search query \"generative agents\"\n",
- "google_scholar_url = 'https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG='\n",
+ " except Exception as e:\n",
+ " print(\"There was a problem parsing the result:\")\n",
+ " print(e)\n",
"\n",
- "find_latest_paper(google_scholar_url)\n",
+ " else:\n",
+ " # print details of the most recent article\n",
+ " print('The latest paper on', query['search_query'], 'that I could find is:\\n')\n",
+ " print('Title: ', entry.title)\n",
+ " print('Author: ', entry.author)\n",
+ " print('Link: ', entry.link)\n",
+ " print('\\nAbstract: ', entry.summary)\n",
+ "\n",
+ "# search for the latest paper about \"generative agents\"\n",
+ "search_arxiv(\"generative agents\")\n",
"```\n",
"\n",
- "Always use this script carefully because web-scraping isn't always reliable or legal on all web pages. Always ensure you have express permission or that the website's terms and conditions don't forbid this kind of usage.\n",
+ "To execute this script:\n",
+ "1. Save the code to a file named arxiv_search.py\n",
+ "2. Run `pip install feedparser` to install the necessary library.\n",
+ "3. Run `python arxiv_search.py` to execute the script and get the latest paper on generative agents.\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001b[33mchat_manager\u001b[0m (to Alice):\n",
+ "\u001b[33mBob\u001b[0m (to chat_manager):\n",
"\n",
- "Your code as it stands can throw an exception and result in an error if the HTTP request fails or if no search results are found. Also, the use of 'beautifulsoup4' and 'requests' should be well-documented.\n",
+ "The amendment is correctly appended. You have properly imported the required `urllib.request` module. The code is now ready for execution. It correctly searches for the latest paper about \"generative agents\" using the arXiv API, and handles potential exceptions that may arise during the connection to the API or while parsing the response. Please proceed to execute this code.\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[31m\n",
+ ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
+ "\u001b[33mHuman\u001b[0m (to chat_manager):\n",
"\n",
- "Here is the more secure and documented code:\n",
+ "exitcode: 1 (execution failed)\n",
+ "Code output: \n",
+ "Traceback (most recent call last):\n",
+ " File \"arxiv_search.py\", line 4, in \n",
+ " import feedparser\n",
+ "ModuleNotFoundError: No module named 'feedparser'\n",
"\n",
- "```python\n",
- "import requests\n",
- "from bs4 import BeautifulSoup\n",
"\n",
- "# Function that uses requests.get to fetch an URL's content\n",
- "def get_url_content(url):\n",
- " try:\n",
- " response = requests.get(url)\n",
- " response.raise_for_status()\n",
- " return response.text\n",
- " except (requests.RequestException, ValueError) as error:\n",
- " print(f'Google scholar cannot be accessed because of: {error}')\n",
- " return None\n",
- "\n",
- "# Function to find the title of the latest paper about \"generative agents\"\n",
- "def find_latest_paper(url):\n",
- " html = get_url_content(url)\n",
- " if html:\n",
- " # Parse the HTML content of the page\n",
- " soup = BeautifulSoup(html, 'html.parser')\n",
- " # Find the first result (which is the latest one)\n",
- " result = soup.find('h3', {'class': 'gs_rt'})\n",
- " \n",
- " # If result found, print its title; Otherwise, print paper not found\n",
- " if result:\n",
- " title = result.a.text\n",
- " print(f\"The title of the latest paper about 'generative agents' is:\\n{title}\")\n",
- " else:\n",
- " print(\"No papers about 'generative agents' found.\")\n",
- " else:\n",
- " print(\"No internet or Google scholar is down.\")\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mAlice\u001b[0m (to chat_manager):\n",
"\n",
- "# URL of Google scholar with a search query \"generative agents\"\n",
- "google_scholar_url = 'https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=generative+agents&btnG='\n",
+ "My apologies for the inconvenience. It appears that the 'feedparser' module is not installed. Please install the 'feedparser' module by running the following command:\n",
"\n",
- "find_latest_paper(google_scholar_url)\n",
+ "```sh\n",
+ "pip install feedparser\n",
"```\n",
"\n",
- "Always use this script carefully because web-scraping isn't always reliable or legal on all web pages. Always ensure you have express permission or that the website's terms and conditions don't forbid this kind of usage.\n",
+ "Once you have installed the 'feedparser' module, please execute the arxiv_search.py script again:\n",
"\n",
- "--------------------------------------------------------------------------------\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[31m\n",
- ">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n",
- "\u001b[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
- "\u001b[31m\n",
- ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
- "\u001b[33mHuman\u001b[0m (to chat_manager):\n",
- "\n",
- "exitcode: 0 (execution succeeded)\n",
- "Code output: \n",
- "The title of the latest paper about 'generative agents' is:\n",
- "Generative agents for player decision modeling in games\n",
+ "```sh\n",
+ "python arxiv_search.py\n",
+ "```\n",
"\n",
+ "This should execute the script and fetch the latest paper on generative agents.\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001b[33mchat_manager\u001b[0m (to Alice):\n",
- "\n",
- "exitcode: 0 (execution succeeded)\n",
- "Code output: \n",
- "The title of the latest paper about 'generative agents' is:\n",
- "Generative agents for player decision modeling in games\n",
+ "\u001b[33mBob\u001b[0m (to chat_manager):\n",
"\n",
+ "That's correct. Make sure to install the 'feedparser' module using the provided command, and then you should be able to execute the updated arxiv_search.py script successfully. The script will search for the latest paper about \"generative agents\" using the arXiv API, and return information about the most recent article it finds.\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001b[33mchat_manager\u001b[0m (to Bob):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is sh)...\u001b[0m\n",
+ "\u001b[31m\n",
+ ">>>>>>>> EXECUTING CODE BLOCK 1 (inferred language is sh)...\u001b[0m\n",
+ "\u001b[33mHuman\u001b[0m (to chat_manager):\n",
"\n",
"exitcode: 0 (execution succeeded)\n",
"Code output: \n",
- "The title of the latest paper about 'generative agents' is:\n",
- "Generative agents for player decision modeling in games\n",
+ "Defaulting to user installation because normal site-packages is not writeable\n",
+ "Collecting feedparser\n",
+ " Downloading feedparser-6.0.10-py3-none-any.whl (81 kB)\n",
+ " ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 81.1/81.1 KB 17.2 MB/s eta 0:00:00\n",
+ "Collecting sgmllib3k\n",
+ " Downloading sgmllib3k-1.0.0.tar.gz (5.8 kB)\n",
+ " Preparing metadata (setup.py): started\n",
+ " Preparing metadata (setup.py): finished with status 'done'\n",
+ "Building wheels for collected packages: sgmllib3k\n",
+ " Building wheel for sgmllib3k (setup.py): started\n",
+ " Building wheel for sgmllib3k (setup.py): finished with status 'done'\n",
+ " Created wheel for sgmllib3k: filename=sgmllib3k-1.0.0-py3-none-any.whl size=6046 sha256=867dc31954f27685ad79808f2ca2b5d8235496de750c61f110c200ba664a50e4\n",
+ " Stored in directory: /home/vscode/.cache/pip/wheels/65/7a/a7/78c287f64e401255dff4c13fdbc672fed5efbfd21c530114e1\n",
+ "Successfully built sgmllib3k\n",
+ "Installing collected packages: sgmllib3k, feedparser\n",
+ "Successfully installed feedparser-6.0.10 sgmllib3k-1.0.0\n",
+ "\n",
+ "The latest paper on ti:generative agents that I could find is:\n",
+ "\n",
+ "Title: LayoutLLM-T2I: Eliciting Layout Guidance from LLM for Text-to-Image\n",
+ " Generation\n",
+ "Author: Tat-Seng Chua\n",
+ "Link: http://arxiv.org/abs/2308.05095v1\n",
+ "\n",
+ "Abstract: In the text-to-image generation field, recent remarkable progress in Stable\n",
+ "Diffusion makes it possible to generate rich kinds of novel photorealistic\n",
+ "images. However, current models still face misalignment issues (e.g.,\n",
+ "problematic spatial relation understanding and numeration failure) in complex\n",
+ "natural scenes, which impedes the high-faithfulness text-to-image generation.\n",
+ "Although recent efforts have been made to improve controllability by giving\n",
+ "fine-grained guidance (e.g., sketch and scribbles), this issue has not been\n",
+ "fundamentally tackled since users have to provide such guidance information\n",
+ "manually. In this work, we strive to synthesize high-fidelity images that are\n",
+ "semantically aligned with a given textual prompt without any guidance. Toward\n",
+ "this end, we propose a coarse-to-fine paradigm to achieve layout planning and\n",
+ "image generation. Concretely, we first generate the coarse-grained layout\n",
+ "conditioned on a given textual prompt via in-context learning based on Large\n",
+ "Language Models. Afterward, we propose a fine-grained object-interaction\n",
+ "diffusion method to synthesize high-faithfulness images conditioned on the\n",
+ "prompt and the automatically generated layout. Extensive experiments\n",
+ "demonstrate that our proposed method outperforms the state-of-the-art models in\n",
+ "terms of layout and image generation. Our code and settings are available at\n",
+ "\\url{https://layoutllm-t2i.github.io}.\n",
"\n",
"\n",
"--------------------------------------------------------------------------------\n"
@@ -479,7 +472,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.17"
+ "version": "3.9.16"
},
"orig_nbformat": 4
},
diff --git a/notebook/autogen_agentchat_human_feedback.ipynb b/notebook/autogen_agentchat_human_feedback.ipynb
index fd97ce062d..1e8bf8271c 100644
--- a/notebook/autogen_agentchat_human_feedback.ipynb
+++ b/notebook/autogen_agentchat_human_feedback.ipynb
@@ -20,7 +20,7 @@
"# Auto Generated Agent Chat: Task Solving with Code Generation, Execution, Debugging & Human Feedback\n",
"\n",
"FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
- "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
+ "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to solve a challenging math problem with human feedback. Here `AssistantAgent` is an LLM-based agent that can write Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. By setting `human_input_mode` properly, the `UserProxyAgent` can also prompt the user for feedback to `AssistantAgent`. For example, when `human_input_mode` is set to \"ALWAYS\", the `UserProxyAgent` will always prompt the user for feedback. When user feedback is provided, the `UserProxyAgent` will directly pass the feedback to `AssistantAgent`. When no user feedback is provided, the `UserProxyAgent` will execute the code written by `AssistantAgent` and return the execution results (success or failure and corresponding outputs) to `AssistantAgent`.\n",
"\n",
@@ -45,7 +45,7 @@
},
"outputs": [],
"source": [
- "# %pip install flaml[autogen]~=2.0.0rc4"
+ "# %pip install flaml[autogen]~=2.0.0"
]
},
{
diff --git a/notebook/autogen_agentchat_planning.ipynb b/notebook/autogen_agentchat_planning.ipynb
index 2341eb4042..206a46f8d7 100644
--- a/notebook/autogen_agentchat_planning.ipynb
+++ b/notebook/autogen_agentchat_planning.ipynb
@@ -19,7 +19,7 @@
"source": [
"# Auto Generated Agent Chat: Collaborative Task Solving with Coding and Planning Agent\n",
"\n",
- "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
+ "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use multiple agents to work together and accomplish a task which requires finding info from the web and coding. `AssistantAgent` is an LLM-based agent that can write and debug Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. We further create a planning agent for the assistant agent to consult. The planning agent is a variation of the LLM-based `AssistantAgent` with a different system message.\n",
"\n",
@@ -44,7 +44,7 @@
},
"outputs": [],
"source": [
- "# %pip install flaml[autogen]~=2.0.0rc4 docker"
+ "# %pip install flaml[autogen]~=2.0.0 docker"
]
},
{
diff --git a/notebook/autogen_agentchat_stream.ipynb b/notebook/autogen_agentchat_stream.ipynb
index b94c9af4d0..4dc43a57ec 100644
--- a/notebook/autogen_agentchat_stream.ipynb
+++ b/notebook/autogen_agentchat_stream.ipynb
@@ -20,7 +20,7 @@
"# Interactive LLM Agent Dealing with Data Stream\n",
"\n",
"`flaml.autogen` offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
- "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
+ "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use customized agents to continuously acquires news from the web and ask for investment suggestions.\n",
"\n",
@@ -45,7 +45,7 @@
},
"outputs": [],
"source": [
- "# %pip install flaml[autogen]~=2.0.0rc5"
+ "# %pip install flaml[autogen]~=2.0.0"
]
},
{
@@ -244,9 +244,9 @@
" default_auto_reply=None,\n",
")\n",
"\n",
- "async def add_data_reply(recipient, messages, sender, context):\n",
+ "async def add_data_reply(recipient, messages, sender, config):\n",
" await asyncio.sleep(0.1)\n",
- " data = context[\"news_stream\"]\n",
+ " data = config[\"news_stream\"]\n",
" if data.done():\n",
" result = data.result()\n",
" if result:\n",
@@ -258,7 +258,7 @@
" )\n",
" return False, None\n",
"\n",
- "user_proxy.register_auto_reply(autogen.AssistantAgent, add_data_reply, 1, context={\"news_stream\": data})"
+ "user_proxy.register_auto_reply(autogen.AssistantAgent, add_data_reply, 1, config={\"news_stream\": data})"
]
},
{
diff --git a/notebook/autogen_agentchat_two_users.ipynb b/notebook/autogen_agentchat_two_users.ipynb
index 4d7c224814..2c78f8636e 100644
--- a/notebook/autogen_agentchat_two_users.ipynb
+++ b/notebook/autogen_agentchat_two_users.ipynb
@@ -19,7 +19,7 @@
"source": [
"# Auto Generated Agent Chat: Collaborative Task Solving with Multiple Agents and Human Users\n",
"\n",
- "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
+ "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate an application involving multiple agents and human users to work together and accomplish a task. `AssistantAgent` is an LLM-based agent that can write Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. We create multiple `UserProxyAgent` instances which can represent different human users.\n",
"\n",
@@ -44,7 +44,7 @@
},
"outputs": [],
"source": [
- "# %pip install flaml[autogen]~=2.0.0rc4"
+ "# %pip install flaml[autogen]~=2.0.0"
]
},
{
diff --git a/notebook/autogen_agentchat_web_info.ipynb b/notebook/autogen_agentchat_web_info.ipynb
index 7b89d5f6f8..78036e4036 100644
--- a/notebook/autogen_agentchat_web_info.ipynb
+++ b/notebook/autogen_agentchat_web_info.ipynb
@@ -20,7 +20,7 @@
"# Auto Generated Agent Chat: Solving Tasks Requiring Web Info\n",
"\n",
"FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
- "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
+ "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to perform tasks which require acquiring info from the web:\n",
"* discuss a paper based on its URL.\n",
@@ -49,7 +49,7 @@
},
"outputs": [],
"source": [
- "# %pip install flaml[autogen]~=2.0.0rc4 docker"
+ "# %pip install flaml[autogen]~=2.0.0 docker"
]
},
{
diff --git a/notebook/autogen_openai_completion.ipynb b/notebook/autogen_openai_completion.ipynb
index 621ebd57d6..76ba628883 100644
--- a/notebook/autogen_openai_completion.ipynb
+++ b/notebook/autogen_openai_completion.ipynb
@@ -48,7 +48,7 @@
},
"outputs": [],
"source": [
- "# %pip install flaml[autogen,blendsearch]~=2.0.0rc4 datasets"
+ "# %pip install flaml[autogen,blendsearch]~=2.0.0 datasets"
]
},
{
diff --git a/test/autogen/agentchat/test_async.py b/test/autogen/agentchat/test_async.py
index c0be15788d..662d030f2d 100644
--- a/test/autogen/agentchat/test_async.py
+++ b/test/autogen/agentchat/test_async.py
@@ -84,9 +84,9 @@ async def add_stock_price_data():
default_auto_reply=None,
)
- async def add_data_reply(recipient, messages, sender, context):
+ async def add_data_reply(recipient, messages, sender, config):
await asyncio.sleep(0.1)
- data = context["news_stream"]
+ data = config["news_stream"]
if data.done():
result = data.result()
if result:
@@ -98,7 +98,7 @@ async def add_data_reply(recipient, messages, sender, context):
)
return False, None
- user_proxy.register_auto_reply(autogen.AssistantAgent, add_data_reply, 1, context={"news_stream": data})
+ user_proxy.register_auto_reply(autogen.AssistantAgent, add_data_reply, 1, config={"news_stream": data})
await user_proxy.a_initiate_chat(
assistant,
diff --git a/test/autogen/agentchat/test_groupchat.py b/test/autogen/agentchat/test_groupchat.py
index 6873c3c69b..3b8a307970 100644
--- a/test/autogen/agentchat/test_groupchat.py
+++ b/test/autogen/agentchat/test_groupchat.py
@@ -52,8 +52,8 @@ def test_plugin():
group_chat_manager.register_auto_reply(
autogen.Agent,
reply_func=autogen.GroupChatManager.run_chat,
- context=groupchat,
- reset_context=autogen.GroupChat.reset,
+ config=groupchat,
+ reset_config=autogen.GroupChat.reset,
)
agent1.initiate_chat(group_chat_manager, message="hello")
diff --git a/test/autogen/agentchat/test_responsive_agent.py b/test/autogen/agentchat/test_responsive_agent.py
index 95bd0f83f3..8d169c7778 100644
--- a/test/autogen/agentchat/test_responsive_agent.py
+++ b/test/autogen/agentchat/test_responsive_agent.py
@@ -5,38 +5,38 @@
def test_trigger():
agent = ResponsiveAgent("a0", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER")
agent1 = ResponsiveAgent("a1", max_consecutive_auto_reply=0, human_input_mode="NEVER")
- agent.register_auto_reply(agent1, lambda recipient, messages, sender, context: (True, "hello"))
+ agent.register_auto_reply(agent1, lambda recipient, messages, sender, config: (True, "hello"))
agent1.initiate_chat(agent, message="hi")
assert agent1.last_message(agent)["content"] == "hello"
- agent.register_auto_reply("a1", lambda recipient, messages, sender, context: (True, "hello a1"))
+ agent.register_auto_reply("a1", lambda recipient, messages, sender, config: (True, "hello a1"))
agent1.initiate_chat(agent, message="hi")
assert agent1.last_message(agent)["content"] == "hello a1"
agent.register_auto_reply(
- ResponsiveAgent, lambda recipient, messages, sender, context: (True, "hello responsive agent")
+ ResponsiveAgent, lambda recipient, messages, sender, config: (True, "hello responsive agent")
)
agent1.initiate_chat(agent, message="hi")
assert agent1.last_message(agent)["content"] == "hello responsive agent"
agent.register_auto_reply(
- lambda sender: sender.name.startswith("a"), lambda recipient, messages, sender, context: (True, "hello a")
+ lambda sender: sender.name.startswith("a"), lambda recipient, messages, sender, config: (True, "hello a")
)
agent1.initiate_chat(agent, message="hi")
assert agent1.last_message(agent)["content"] == "hello a"
agent.register_auto_reply(
- lambda sender: sender.name.startswith("b"), lambda recipient, messages, sender, context: (True, "hello b")
+ lambda sender: sender.name.startswith("b"), lambda recipient, messages, sender, config: (True, "hello b")
)
agent1.initiate_chat(agent, message="hi")
assert agent1.last_message(agent)["content"] == "hello a"
agent.register_auto_reply(
- ["agent2", agent1], lambda recipient, messages, sender, context: (True, "hello agent2 or agent1")
+ ["agent2", agent1], lambda recipient, messages, sender, config: (True, "hello agent2 or agent1")
)
agent1.initiate_chat(agent, message="hi")
assert agent1.last_message(agent)["content"] == "hello agent2 or agent1"
agent.register_auto_reply(
- ["agent2", "agent3"], lambda recipient, messages, sender, context: (True, "hello agent2 or agent3")
+ ["agent2", "agent3"], lambda recipient, messages, sender, config: (True, "hello agent2 or agent3")
)
agent1.initiate_chat(agent, message="hi")
assert agent1.last_message(agent)["content"] == "hello agent2 or agent1"
- pytest.raises(ValueError, agent.register_auto_reply, 1, lambda recipient, messages, sender, context: (True, "hi"))
+ pytest.raises(ValueError, agent.register_auto_reply, 1, lambda recipient, messages, sender, config: (True, "hi"))
pytest.raises(ValueError, agent._match_trigger, 1, agent1)
diff --git a/website/docs/Getting-Started.md b/website/docs/Getting-Started.md
index ce86920045..7e683a6a04 100644
--- a/website/docs/Getting-Started.md
+++ b/website/docs/Getting-Started.md
@@ -20,7 +20,7 @@ Install FLAML from pip: `pip install flaml`. Find more options in [Installation]
There are several ways of using flaml:
-#### (New) [Auto Generation](/docs/Use-Cases/Auto-Generation)
+#### (New) [Autogen](/docs/Use-Cases/Autogen)
Maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4, including:
- A drop-in replacement of `openai.Completion` or `openai.ChatCompletion` with powerful functionalites like tuning, caching, templating, filtering. For example, you can optimize generations by LLM with your own tuning data, success metrics and budgets.
@@ -117,7 +117,7 @@ Then, you can use it just like you use the original `LGMBClassifier`. Your other
### Where to Go Next?
-* Understand the use cases for [Auto Generation](/docs/Use-Cases/Auto-Generation), [Task-oriented AutoML](/docs/Use-Cases/Task-Oriented-Automl), [Tune user-defined function](/docs/Use-Cases/Tune-User-Defined-Function) and [Zero-shot AutoML](/docs/Use-Cases/Zero-Shot-AutoML).
+* Understand the use cases for [Autogen](/docs/Use-Cases/Auto-Generation), [Task-oriented AutoML](/docs/Use-Cases/Task-Oriented-Automl), [Tune user-defined function](/docs/Use-Cases/Tune-User-Defined-Function) and [Zero-shot AutoML](/docs/Use-Cases/Zero-Shot-AutoML).
* Find code examples under "Examples": from [AutoGen - OpenAI](/docs/Examples/AutoGen-OpenAI) to [Tune - PyTorch](/docs/Examples/Tune-PyTorch).
* Learn about [research](/docs/Research) around FLAML and check [blogposts](/blog).
* Chat on [Discord](https://discord.gg/Cppx2vSPVP).
diff --git a/website/docs/Installation.md b/website/docs/Installation.md
index 31145f649d..8e03f927cd 100644
--- a/website/docs/Installation.md
+++ b/website/docs/Installation.md
@@ -15,7 +15,7 @@ conda install flaml -c conda-forge
### Optional Dependencies
-#### [Auto Generation](Use-Cases/Auto-Generation)
+#### [Autogen](Use-Cases/Autogen)
```bash
pip install "flaml[autogen]"
diff --git a/website/docs/Use-Cases/Auto-Generation.md b/website/docs/Use-Cases/Autogen.md
similarity index 100%
rename from website/docs/Use-Cases/Auto-Generation.md
rename to website/docs/Use-Cases/Autogen.md
From 88496d871add7876cc32bcc9fef71ed6549033f0 Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Fri, 11 Aug 2023 01:43:58 +0000
Subject: [PATCH 02/13] url
---
README.md | 6 +++---
...utogen_agentchat_auto_feedback_from_code_execution.ipynb | 2 +-
website/blog/2023-04-21-LLM-tuning-math/index.mdx | 4 ++--
website/blog/2023-05-18-GPT-adaptive-humaneval/index.mdx | 4 ++--
website/blog/2023-07-14-Local-LLMs/index.mdx | 2 +-
website/docs/Getting-Started.md | 2 +-
6 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/README.md b/README.md
index a2a1d58c8a..2a8992c9c7 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@
:fire: FLAML is highlighted in OpenAI's [cookbook](https://github.com/openai/openai-cookbook#related-resources-from-around-the-web).
-:fire: [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation) is released with support for ChatGPT and GPT-4, based on [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673).
+:fire: [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) is released with support for ChatGPT and GPT-4, based on [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673).
:fire: FLAML supports AutoML and Hyperparameter Tuning features in [Microsoft Fabric](https://learn.microsoft.com/en-us/fabric/get-started/microsoft-fabric-overview) private preview. Sign up for these features at: https://aka.ms/fabric/data-science/sign-up.
@@ -45,7 +45,7 @@ FLAML requires **Python version >= 3.7**. It can be installed from pip:
pip install flaml
```
-Minimal dependencies are installed without extra options. You can install extra options based on the feature you need. For example, use the following to install the dependencies needed by the [`autogen`](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation) package.
+Minimal dependencies are installed without extra options. You can install extra options based on the feature you need. For example, use the following to install the dependencies needed by the [`autogen`](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) package.
```bash
pip install "flaml[autogen]"
```
@@ -63,7 +63,7 @@ Use the following guides to get started with FLAML in .NET:
## Quickstart
-* (New) The [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation) package can help you maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4, including:
+* (New) The [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) package can help you maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4, including:
- A drop-in replacement of `openai.Completion` or `openai.ChatCompletion` with powerful functionalites like tuning, caching, templating, filtering. For example, you can optimize generations by LLM with your own tuning data, success metrics and budgets.
```python
from flaml import autogen
diff --git a/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb b/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
index b2432cb45f..35fd5301dd 100644
--- a/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
+++ b/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
@@ -20,7 +20,7 @@
"# Auto Generated Agent Chat: Task Solving with Code Generation, Execution & Debugging\n",
"\n",
"FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
- "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
+ "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to write code and execute the code. Here `AssistantAgent` is an LLM-based agent that can write Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for the human user to execute the code written by `AssistantAgent`, or automatically execute the code. Depending on the setting of `human_input_mode` and `max_consecutive_auto_reply`, the `UserProxyAgent` either solicits feedback from the human user or returns auto-feedback based on the result of code execution (success or failure and corresponding outputs) to `AssistantAgent`. `AssistantAgent` will debug the code and suggest new code if the result contains error. The two agents keep communicating to each other until the task is done.\n",
"\n",
diff --git a/website/blog/2023-04-21-LLM-tuning-math/index.mdx b/website/blog/2023-04-21-LLM-tuning-math/index.mdx
index 0e2aa4e54d..2fdb79533f 100644
--- a/website/blog/2023-04-21-LLM-tuning-math/index.mdx
+++ b/website/blog/2023-04-21-LLM-tuning-math/index.mdx
@@ -16,7 +16,7 @@ Large language models (LLMs) are powerful tools that can generate natural langua
In this blog post, we will explore how model and inference parameter matter in LLM applications, using a case study for [MATH](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html), a benchmark for evaluating LLMs on advanced mathematical problem solving. MATH consists of 12K math competition problems from AMC-10, AMC-12 and AIME. Each problem is accompanied by a step-by-step solution.
-We will use the new subpackage [`flaml.autogen`](docs/Use-Cases/Auto-Generation) to automatically find the best model and inference parameter for LLMs on a given task and dataset given an inference budget, using a novel low-cost search & pruning strategy. FLAML currently supports all the LLMs from OpenAI, such as GPT-3.5 and GPT-4.
+We will use the new subpackage [`flaml.autogen`](docs/Use-Cases/Autogen) to automatically find the best model and inference parameter for LLMs on a given task and dataset given an inference budget, using a novel low-cost search & pruning strategy. FLAML currently supports all the LLMs from OpenAI, such as GPT-3.5 and GPT-4.
We will use FLAML to perform model selection and inference parameter tuning. Then we compare the performance and inference cost on solving algebra problems with the untuned gpt-4. We will also analyze how different difficulty levels affect the results.
@@ -69,6 +69,6 @@ The need for model selection, parameter tuning and cost saving is not specific t
## For Further Reading
* [Research paper about the tuning technique](https://arxiv.org/abs/2303.04673)
-* [Documentation about `flaml.autogen`](/docs/Use-Cases/Auto-Generation)
+* [Documentation about `flaml.autogen`](/docs/Use-Cases/Autogen)
*Do you have any experience to share about LLM applications? Do you like to see more support or research of LLM optimization or automation? Please join our [Discord](https://discord.gg/Cppx2vSPVP) server for discussion.*
diff --git a/website/blog/2023-05-18-GPT-adaptive-humaneval/index.mdx b/website/blog/2023-05-18-GPT-adaptive-humaneval/index.mdx
index e519b5827d..12e2bd6701 100644
--- a/website/blog/2023-05-18-GPT-adaptive-humaneval/index.mdx
+++ b/website/blog/2023-05-18-GPT-adaptive-humaneval/index.mdx
@@ -144,7 +144,7 @@ An example notebook to run this experiment can be found at: https://github.com/m
## Discussion
-Our solution is quite simple to [implement](/docs/reference/autogen/code_utils#implement) using a generic interface offered in [`flaml.autogen`](/docs/Use-Cases/Auto-Generation#logic-error), yet the result is quite encouraging.
+Our solution is quite simple to [implement](/docs/reference/autogen/code_utils#implement) using a generic interface offered in [`flaml.autogen`](/docs/Use-Cases/Autogen#logic-error), yet the result is quite encouraging.
While the specific way of generating assertions is application-specific, the main ideas are general in LLM operations:
* Generate multiple responses to select - especially useful when selecting a good response is relatively easier than generating a good response at one shot.
@@ -164,5 +164,5 @@ There are many directions of extensions in research and development:
## For Further Reading
-* [Documentation](/docs/Use-Cases/Auto-Generation) about `flaml.autogen` and [Research paper](https://arxiv.org/abs/2303.04673).
+* [Documentation](/docs/Use-Cases/Autogen) about `flaml.autogen` and [Research paper](https://arxiv.org/abs/2303.04673).
* [Blog post](/blog/2023/04/21/LLM-tuning-math) about a related study for math.
diff --git a/website/blog/2023-07-14-Local-LLMs/index.mdx b/website/blog/2023-07-14-Local-LLMs/index.mdx
index 9faf149823..3f04b6d18b 100644
--- a/website/blog/2023-07-14-Local-LLMs/index.mdx
+++ b/website/blog/2023-07-14-Local-LLMs/index.mdx
@@ -143,5 +143,5 @@ print(response)
## For Further Reading
-* [Documentation](/docs/Use-Cases/Auto-Generation) about `flaml.autogen`
+* [Documentation](/docs/Use-Cases/Autogen) about `flaml.autogen`
* [Documentation](https://github.com/lm-sys/FastChat) about FastChat.
diff --git a/website/docs/Getting-Started.md b/website/docs/Getting-Started.md
index 7e683a6a04..0d4a121739 100644
--- a/website/docs/Getting-Started.md
+++ b/website/docs/Getting-Started.md
@@ -117,7 +117,7 @@ Then, you can use it just like you use the original `LGMBClassifier`. Your other
### Where to Go Next?
-* Understand the use cases for [Autogen](/docs/Use-Cases/Auto-Generation), [Task-oriented AutoML](/docs/Use-Cases/Task-Oriented-Automl), [Tune user-defined function](/docs/Use-Cases/Tune-User-Defined-Function) and [Zero-shot AutoML](/docs/Use-Cases/Zero-Shot-AutoML).
+* Understand the use cases for [Autogen](/docs/Use-Cases/Autogen), [Task-oriented AutoML](/docs/Use-Cases/Task-Oriented-Automl), [Tune user-defined function](/docs/Use-Cases/Tune-User-Defined-Function) and [Zero-shot AutoML](/docs/Use-Cases/Zero-Shot-AutoML).
* Find code examples under "Examples": from [AutoGen - OpenAI](/docs/Examples/AutoGen-OpenAI) to [Tune - PyTorch](/docs/Examples/Tune-PyTorch).
* Learn about [research](/docs/Research) around FLAML and check [blogposts](/blog).
* Chat on [Discord](https://discord.gg/Cppx2vSPVP).
From 8c58c4bda3a219df93989b9f032c1ba9c818c6f5 Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Fri, 11 Aug 2023 01:56:43 +0000
Subject: [PATCH 03/13] url
---
flaml/autogen/oai/completion.py | 2 +-
website/blog/2023-05-07-1M-milestone/index.mdx | 4 ++--
website/blog/2023-06-28-MathChat/index.mdx | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/flaml/autogen/oai/completion.py b/flaml/autogen/oai/completion.py
index c3c09f9a6c..138c658e35 100644
--- a/flaml/autogen/oai/completion.py
+++ b/flaml/autogen/oai/completion.py
@@ -695,7 +695,7 @@ def create(
E.g., `prompt="Complete the following sentence: {prefix}, context={"prefix": "Today I feel"}`.
The actual prompt will be:
"Complete the following sentence: Today I feel".
- More examples can be found at [templating](/docs/Use-Cases/Auto-Generation#templating).
+ More examples can be found at [templating](/docs/Use-Cases/Autogen#templating).
use_cache (bool, Optional): Whether to use cached responses.
config_list (List, Optional): List of configurations for the completion to try.
The first one that does not raise an error will be used.
diff --git a/website/blog/2023-05-07-1M-milestone/index.mdx b/website/blog/2023-05-07-1M-milestone/index.mdx
index ac5318ef1b..21ca2791a0 100644
--- a/website/blog/2023-05-07-1M-milestone/index.mdx
+++ b/website/blog/2023-05-07-1M-milestone/index.mdx
@@ -19,7 +19,7 @@ We'd also like to take the opportunity to reflect on FLAML's past achievements a
### Bring AutoML to One's Fingertips
FLAML offers an off-the-shelf AutoML solution that enables users to quickly discover high-quality models or configurations for common ML/AI tasks. By automatically selecting models and hyperparameters for training or inference, FLAML saves users time and effort. FLAML has significantly reduced development time for developers and data scientists alike, while also providing a convenient way to integrate new algorithms into the pipeline, enabling easy extensions and large-scale parallel tuning. These features make FLAML a valuable tool in R&D efforts for many enterprise users.
-FLAML is capable of handling a variety of common ML tasks, such as [classification](https://microsoft.github.io/FLAML/docs/Examples/AutoML-Classification), [regression](https://microsoft.github.io/FLAML/docs/Examples/AutoML-Regression), [time series forecasting](https://microsoft.github.io/FLAML/docs/Examples/AutoML-Time%20series%20forecast), [NLP tasks](https://microsoft.github.io/FLAML/docs/Examples/AutoML-Rank), and [generative tasks](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation), providing a comprehensive solution for various applications.
+FLAML is capable of handling a variety of common ML tasks, such as [classification](https://microsoft.github.io/FLAML/docs/Examples/AutoML-Classification), [regression](https://microsoft.github.io/FLAML/docs/Examples/AutoML-Regression), [time series forecasting](https://microsoft.github.io/FLAML/docs/Examples/AutoML-Time%20series%20forecast), [NLP tasks](https://microsoft.github.io/FLAML/docs/Examples/AutoML-Rank), and [generative tasks](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen), providing a comprehensive solution for various applications.
### Speed and Efficiency: The FLAML Advantage
What sets FLAML apart from other AutoML libraries is its exceptional efficiency, thanks to the economical and efficient hyperparameter optimization and model selection methods developed in our [research](https://microsoft.github.io/FLAML/docs/Research). FLAML is also capable of handling large search spaces with heterogeneous evaluation costs, complex constraints, guidance, and early stopping. The [zero-shot AutoML](https://microsoft.github.io/FLAML/docs/Use-Cases/Zero-Shot-AutoML) option further reduces the cost of AutoML, making FLAML an even more attractive solution for a wide range of applications with low resources.
@@ -37,7 +37,7 @@ We invite contributions from anyone interested in this topic and look forward to
## For Further Reading
-* [Documentation about `flaml.autogen`](/docs/Use-Cases/Auto-Generation)
+* [Documentation about `flaml.autogen`](/docs/Use-Cases/Autogen)
* [Code Example: Tune chatGPT for Math Problem Solving with FLAML](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_chatgpt_gpt4.ipynb)
*Do you have any experience to share about LLM applications? Do you like to see more support or research of LLMOps? Please join our [Discord](https://discord.gg/Cppx2vSPVP) server for discussion.*
diff --git a/website/blog/2023-06-28-MathChat/index.mdx b/website/blog/2023-06-28-MathChat/index.mdx
index 0603a38983..d94075d0fe 100644
--- a/website/blog/2023-06-28-MathChat/index.mdx
+++ b/website/blog/2023-06-28-MathChat/index.mdx
@@ -89,6 +89,6 @@ Further work can be done to enhance this framework or math problem-solving in ge
## For Further Reading
* [Research paper of MathChat](https://arxiv.org/abs/2306.01337)
-* [Documentation about `flaml.autogen`](/docs/Use-Cases/Auto-Generation)
+* [Documentation about `flaml.autogen`](/docs/Use-Cases/Autogen)
*Are you working on applications that involve math problem-solving? Would you appreciate additional research or support on the application of LLM-based agents for math problem-solving? Please join our [Discord](https://discord.gg/Cppx2vSPVP) server for discussion.*
From 967b9e324dd01d58056784b1463f751d7042c7b2 Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Sun, 13 Aug 2023 14:18:17 +0000
Subject: [PATCH 04/13] readme
---
README.md | 82 +++++++++++--------------
website/docs/Examples/AutoGen-OpenAI.md | 4 +-
website/docs/Getting-Started.md | 37 +++++------
3 files changed, 58 insertions(+), 65 deletions(-)
diff --git a/README.md b/README.md
index c3a25730ae..1a77875872 100644
--- a/README.md
+++ b/README.md
@@ -14,6 +14,8 @@
+:fire: The automated multi-agent chat framework in [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) is in preview from v2.0.0.
+
:fire: FLAML is highlighted in OpenAI's [cookbook](https://github.com/openai/openai-cookbook#related-resources-from-around-the-web).
:fire: [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) is released with support for ChatGPT and GPT-4, based on [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673).
@@ -23,23 +25,20 @@
## What is FLAML
FLAML is a lightweight Python library for efficient automation of machine
-learning and AI operations, including selection of
-models, hyperparameters, and other tunable choices of an application (e.g., inference hyperparameters for foundation models, configurations in MLOps/LMOps workflows, pipelines, mathematical/statistical models, algorithms, computing experiments, software configurations).
+learning and AI operations. It automates workflow based on large language models, machine learning models, etc.
+and optimizes their performance.
-* For foundation models like the GPT models, it automates the experimentation and optimization of their performance to maximize the effectiveness for applications and minimize the inference cost. FLAML enables users to build and use adaptive AI agents with minimal effort.
-* For common machine learning tasks like classification and regression, it quickly finds quality models for user-provided data with low computational resources. It is easy to customize or extend. Users can find their desired customizability from a smooth range: minimal customization (computational resource budget), medium customization (e.g., search space and metric), or full customization (arbitrary training/inference/evaluation code).
-* It supports fast and economical automatic tuning, capable of handling complex constraints/guidance/early stopping. FLAML is powered by a [cost-effective
-hyperparameter optimization](https://microsoft.github.io/FLAML/docs/Use-Cases/Tune-User-Defined-Function/#hyperparameter-optimization-algorithm)
-and model selection method invented by Microsoft Research, and many followup [research studies](https://microsoft.github.io/FLAML/docs/Research).
+* FLAML enables building next-gen GPT-X applications based on multi-agent conversations with minimal effort. It simplifies the orchestration, automation and optimization of a complex GPT-X workflow. It maximizes the performance of GPT-X models and augments their weakness.
+* For common machine learning tasks like classification and regression, it quickly finds quality models for user-provided data with low computational resources. It is easy to customize or extend. Users can find their desired customizability from a smooth range.
+* It supports fast and economical automatic tuning (e.g., inference hyperparameters for foundation models, configurations in MLOps/LMOps workflows, pipelines, mathematical/statistical models, algorithms, computing experiments, software configurations), capable of handling large search space with heterogeneous evaluation cost and complex constraints/guidance/early stopping.
-FLAML has a .NET implementation in [ML.NET](http://dot.net/ml), an open-source, cross-platform machine learning framework for .NET. In ML.NET, you can use FLAML via low-code solutions like [Model Builder](https://dotnet.microsoft.com/apps/machinelearning-ai/ml-dotnet/model-builder) Visual Studio extension and the cross-platform [ML.NET CLI](https://docs.microsoft.com/dotnet/machine-learning/automate-training-with-cli). Alternatively, you can use the [ML.NET AutoML API](https://www.nuget.org/packages/Microsoft.ML.AutoML/#versions-body-tab) for a code-first experience.
+FLAML is powered by a series of [research studies](/docs/Research) from Microsoft Research and collaborators such as Penn State University, Stevens Institute of Technology, University of Washington, and University of Waterloo.
+FLAML has a .NET implementation in [ML.NET](http://dot.net/ml), an open-source, cross-platform machine learning framework for .NET.
## Installation
-### Python
-
-FLAML requires **Python version >= 3.7**. It can be installed from pip:
+FLAML requires **Python version >= 3.8**. It can be installed from pip:
```bash
pip install flaml
@@ -53,41 +52,34 @@ pip install "flaml[autogen]"
Find more options in [Installation](https://microsoft.github.io/FLAML/docs/Installation).
Each of the [`notebook examples`](https://github.com/microsoft/FLAML/tree/main/notebook) may require a specific option to be installed.
-### .NET
-
-Use the following guides to get started with FLAML in .NET:
-
-- [Install Model Builder](https://docs.microsoft.com/dotnet/machine-learning/how-to-guides/install-model-builder?tabs=visual-studio-2022)
-- [Install ML.NET CLI](https://docs.microsoft.com/dotnet/machine-learning/how-to-guides/install-ml-net-cli?tabs=windows)
-- [Microsoft.AutoML](https://www.nuget.org/packages/Microsoft.ML.AutoML/0.20.0)
-
## Quickstart
-* (New) The [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) package can help you maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4, including:
- - A drop-in replacement of `openai.Completion` or `openai.ChatCompletion` with powerful functionalites like tuning, caching, templating, filtering. For example, you can optimize generations by LLM with your own tuning data, success metrics and budgets.
- ```python
- from flaml import autogen
-
- # perform tuning
- config, analysis = autogen.Completion.tune(
- data=tune_data,
- metric="success",
- mode="max",
- eval_func=eval_func,
- inference_budget=0.05,
- optimization_budget=3,
- num_samples=-1,
- )
-
- # perform inference for a test instance
- response = autogen.Completion.create(context=test_instance, **config)
- ```
- - LLM-driven intelligent agents which can collaborately perform tasks autonomously or with human feedback, including tasks that require using tools via code.
- ```python
- assistant = autogen.AssistantAgent("assistant")
- user_proxy = autogen.UserProxyAgent("user_proxy")
- user_proxy.initiate_chat(assistant, message="Show me the YTD gain of 10 largest technology companies as of today.")
- ```
+* (New) The [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) package enables the next-gen GPT-X applications with a multi-agent conversation framework.
+It offers customizable and conversable agents which integrate LLMs, tools and human.
+By automating chat among multiple capable agents, one can easily make them collectively perform tasks autonomously or with human feedback, including tasks that require using tools via code. For example,
+```python
+from flaml import autogen
+assistant = autogen.AssistantAgent("assistant")
+user_proxy = autogen.UserProxyAgent("user_proxy")
+user_proxy.initiate_chat(assistant, message="Show me the YTD gain of 10 largest technology companies as of today.")
+# This initiates an automated chat between the two agents to solve the task
+```
+
+Autogen also helps maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4. It offers a drop-in replacement of `openai.Completion` or `openai.ChatCompletion` with powerful functionalites like tuning, caching, templating, filtering. For example, you can optimize generations by LLM with your own tuning data, success metrics and budgets.
+```python
+# perform tuning
+config, analysis = autogen.Completion.tune(
+ data=tune_data,
+ metric="success",
+ mode="max",
+ eval_func=eval_func,
+ inference_budget=0.05,
+ optimization_budget=3,
+ num_samples=-1,
+)
+# perform inference for a test instance
+response = autogen.Completion.create(context=test_instance, **config)
+```
* With three lines of code, you can start using this economical and fast
AutoML engine as a [scikit-learn style estimator](https://microsoft.github.io/FLAML/docs/Use-Cases/Task-Oriented-AutoML).
@@ -124,7 +116,7 @@ estimator.fit(X_train, y_train)
## Documentation
-You can find a detailed documentation about FLAML [here](https://microsoft.github.io/FLAML/) where you can find the API documentation, use cases and examples.
+You can find a detailed documentation about FLAML [here](https://microsoft.github.io/FLAML/).
In addition, you can find:
diff --git a/website/docs/Examples/AutoGen-OpenAI.md b/website/docs/Examples/AutoGen-OpenAI.md
index 0b015c2db1..d8dc9c8a1d 100644
--- a/website/docs/Examples/AutoGen-OpenAI.md
+++ b/website/docs/Examples/AutoGen-OpenAI.md
@@ -1,7 +1,7 @@
-# AutoGen - OpenAI
+# AutoGen - Tune GPT Models
FLAML offers a cost-effective hyperparameter optimization technique [EcoOptiGen](https://arxiv.org/abs/2303.04673) for tuning Large Language Models. Our study finds that tuning hyperparameters can significantly improve the utility of them.
-In this example, we will tune several hyperparameters for the OpenAI's completion API, including the temperature, prompt and n (number of completions), to optimize the inference performance for a code generation task.
+In this example, we will tune several hyperparameters of OpenAI models, including the temperature, prompt, n (number of completions) and max_tokens, to optimize the inference performance for a code generation task.
### Prerequisites
diff --git a/website/docs/Getting-Started.md b/website/docs/Getting-Started.md
index 0d4a121739..1ba0d8863d 100644
--- a/website/docs/Getting-Started.md
+++ b/website/docs/Getting-Started.md
@@ -3,16 +3,16 @@
FLAML is a lightweight Python library for efficient automation of machine
-learning and AI operations, including selection of
-models, hyperparameters, and other tunable choices of an application.
+learning and AI operations. It automates workflow based on large language models, machine learning models, etc.
+and optimizes their performance.
### Main Features
-* For foundation models like the GPT models, it automates the experimentation and optimization of their performance to maximize the effectiveness for applications and minimize the inference cost. FLAML enables users to build and use adaptive AI agents with minimal effort.
-* For common machine learning tasks like classification and regression, it quickly finds quality models for user-provided data with low computational resources. It is easy to customize or extend. Users can find their desired customizability from a smooth range: minimal customization (computational resource budget), medium customization (e.g., search space and metric), or full customization (arbitrary training/inference/evaluation code).
-* It supports fast and economical automatic tuning, capable of handling large search space with heterogeneous evaluation cost and complex constraints/guidance/early stopping. FLAML is powered by a [cost-effective
-hyperparameter optimization](/docs/Use-Cases/Tune-User-Defined-Function#hyperparameter-optimization-algorithm)
-and model selection method invented by Microsoft Research, and many followup [research studies](/docs/Research).
+* FLAML enables building next-gen GPT-X applications based on multi-agent conversations with minimal effort. It simplifies the orchestration, automation and optimization of a complex GPT-X workflow. It maximizes the performance of GPT-X models and augments their weakness.
+* For common machine learning tasks like classification and regression, it quickly finds quality models for user-provided data with low computational resources. It is easy to customize or extend.
+* It supports fast and economical automatic tuning, capable of handling large search space with heterogeneous evaluation cost and complex constraints/guidance/early stopping.
+
+FLAML is powered by a series of [research studies](/docs/Research) from Microsoft Research and collaborators such as Penn State University, Stevens Institute of Technology, University of Washington, and University of Waterloo.
### Quickstart
@@ -22,11 +22,19 @@ There are several ways of using flaml:
#### (New) [Autogen](/docs/Use-Cases/Autogen)
-Maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4, including:
-- A drop-in replacement of `openai.Completion` or `openai.ChatCompletion` with powerful functionalites like tuning, caching, templating, filtering. For example, you can optimize generations by LLM with your own tuning data, success metrics and budgets.
+Autogen enables the next-gen GPT-X applications with a multi-agent conversation framework.
+It offers customizable and conversable agents which integrate LLMs, tools and human.
+By automating chat among multiple capable agents, one can easily make them collectively perform tasks autonomously or with human feedback, including tasks that require using tools via code. For example,
```python
from flaml import autogen
+assistant = autogen.AssistantAgent("assistant")
+user_proxy = autogen.UserProxyAgent("user_proxy")
+user_proxy.initiate_chat(assistant, message="Show me the YTD gain of 10 largest technology companies as of today.")
+# This initiates an automated chat between the two agents to solve the task
+```
+Autogen also helps maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4. It offers a drop-in replacement of `openai.Completion` or `openai.ChatCompletion` with powerful functionalites like tuning, caching, templating, filtering. For example, you can optimize generations by LLM with your own tuning data, success metrics and budgets.
+```python
# perform tuning
config, analysis = autogen.Completion.tune(
data=tune_data,
@@ -37,20 +45,13 @@ config, analysis = autogen.Completion.tune(
optimization_budget=3,
num_samples=-1,
)
-
# perform inference for a test instance
response = autogen.Completion.create(context=test_instance, **config)
```
-- LLM-driven intelligent agents which can perform tasks autonomously or with human feedback, including tasks that require using tools via code. For example,
-```python
-assistant = autogen.AssistantAgent("assistant")
-user_proxy = autogen.UserProxyAgent("user_proxy")
-user_proxy.initiate_chat(assistant, message="Show me the YTD gain of 10 largest technology companies as of today.")
-```
#### [Task-oriented AutoML](/docs/Use-Cases/task-oriented-automl)
-For example, with three lines of code, you can start using this economical and fast AutoML engine as a scikit-learn style estimator.
+With three lines of code, you can start using this economical and fast AutoML engine as a scikit-learn style estimator.
```python
from flaml import AutoML
@@ -118,7 +119,7 @@ Then, you can use it just like you use the original `LGMBClassifier`. Your other
### Where to Go Next?
* Understand the use cases for [Autogen](/docs/Use-Cases/Autogen), [Task-oriented AutoML](/docs/Use-Cases/Task-Oriented-Automl), [Tune user-defined function](/docs/Use-Cases/Tune-User-Defined-Function) and [Zero-shot AutoML](/docs/Use-Cases/Zero-Shot-AutoML).
-* Find code examples under "Examples": from [AutoGen - OpenAI](/docs/Examples/AutoGen-OpenAI) to [Tune - PyTorch](/docs/Examples/Tune-PyTorch).
+* Find code examples under "Examples": from [AutoGen - AgentChat](/docs/Examples/AutoGen-AgentChat) to [Tune - PyTorch](/docs/Examples/Tune-PyTorch).
* Learn about [research](/docs/Research) around FLAML and check [blogposts](/blog).
* Chat on [Discord](https://discord.gg/Cppx2vSPVP).
From f4f9b742e3fe973f9fa73d79e929e8477a823d16 Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Sun, 13 Aug 2023 14:26:33 +0000
Subject: [PATCH 05/13] preview
---
README.md | 2 +-
flaml/autogen/agentchat/agent.py | 2 +-
flaml/autogen/agentchat/assistant_agent.py | 2 +-
flaml/autogen/agentchat/user_proxy_agent.py | 2 +-
flaml/automl/automl.py | 4 ++--
flaml/onlineml/trial.py | 2 +-
website/docs/Getting-Started.md | 2 +-
7 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/README.md b/README.md
index 1a77875872..00581ddd7c 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@ Each of the [`notebook examples`](https://github.com/microsoft/FLAML/tree/main/n
## Quickstart
-* (New) The [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) package enables the next-gen GPT-X applications with a multi-agent conversation framework.
+* (New) The [autogen](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen) package enables the next-gen GPT-X applications with a generic multi-agent conversation framework.
It offers customizable and conversable agents which integrate LLMs, tools and human.
By automating chat among multiple capable agents, one can easily make them collectively perform tasks autonomously or with human feedback, including tasks that require using tools via code. For example,
```python
diff --git a/flaml/autogen/agentchat/agent.py b/flaml/autogen/agentchat/agent.py
index dd96c8c302..9302124998 100644
--- a/flaml/autogen/agentchat/agent.py
+++ b/flaml/autogen/agentchat/agent.py
@@ -2,7 +2,7 @@
class Agent:
- """(Experimental) An abstract class for AI agent.
+ """(In preview) An abstract class for AI agent.
An agent can communicate with other agents and perform actions.
Different agents can differ in what actions they perform in the `receive` method.
diff --git a/flaml/autogen/agentchat/assistant_agent.py b/flaml/autogen/agentchat/assistant_agent.py
index f8e48a0f9f..5609d60f8d 100644
--- a/flaml/autogen/agentchat/assistant_agent.py
+++ b/flaml/autogen/agentchat/assistant_agent.py
@@ -3,7 +3,7 @@
class AssistantAgent(ResponsiveAgent):
- """(Experimental) Assistant agent, designed to solve a task with LLM.
+ """(In preview) Assistant agent, designed to solve a task with LLM.
AssistantAgent is a subclass of ResponsiveAgent configured with a default system message.
The default system message is designed to solve a task with LLM,
diff --git a/flaml/autogen/agentchat/user_proxy_agent.py b/flaml/autogen/agentchat/user_proxy_agent.py
index 7803c810f3..d81c34659e 100644
--- a/flaml/autogen/agentchat/user_proxy_agent.py
+++ b/flaml/autogen/agentchat/user_proxy_agent.py
@@ -3,7 +3,7 @@
class UserProxyAgent(ResponsiveAgent):
- """(Experimental) A proxy agent for the user, that can execute code and provide feedback to the other agents.
+ """(In preview) A proxy agent for the user, that can execute code and provide feedback to the other agents.
UserProxyAgent is a subclass of ResponsiveAgent configured with `human_input_mode` to ALWAYS
and `llm_config` to False. By default, the agent will prompt for human input every time a message is received.
diff --git a/flaml/automl/automl.py b/flaml/automl/automl.py
index 1f91ae4eff..af4159f900 100644
--- a/flaml/automl/automl.py
+++ b/flaml/automl/automl.py
@@ -230,7 +230,7 @@ def custom_metric(
```
seed: int or None, default=None | The random seed for hpo.
- n_concurrent_trials: [Experimental] int, default=1 | The number of
+ n_concurrent_trials: [In preview] int, default=1 | The number of
concurrent trials. When n_concurrent_trials > 1, flaml performes
[parallel tuning](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning)
and installation of ray or spark is required: `pip install flaml[ray]`
@@ -1366,7 +1366,7 @@ def custom_metric(
```
seed: int or None, default=None | The random seed for hpo.
- n_concurrent_trials: [Experimental] int, default=1 | The number of
+ n_concurrent_trials: [In preview] int, default=1 | The number of
concurrent trials. When n_concurrent_trials > 1, flaml performes
[parallel tuning](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning)
and installation of ray or spark is required: `pip install flaml[ray]`
diff --git a/flaml/onlineml/trial.py b/flaml/onlineml/trial.py
index 3c9223c9a2..474969a3cf 100644
--- a/flaml/onlineml/trial.py
+++ b/flaml/onlineml/trial.py
@@ -76,7 +76,7 @@ def __init__(
init_cb: a float to specify the intial confidence bound.
mode: A string in ['min', 'max'] to specify the objective as
minimization or maximization.
- sliding_window_size: An int to specify the size of the sliding windown
+ sliding_window_size: An int to specify the size of the sliding window
(for experimental purpose).
"""
self._result_type_name = result_type_name # for example 'mse' or 'mae'
diff --git a/website/docs/Getting-Started.md b/website/docs/Getting-Started.md
index 1ba0d8863d..718f3685fc 100644
--- a/website/docs/Getting-Started.md
+++ b/website/docs/Getting-Started.md
@@ -22,7 +22,7 @@ There are several ways of using flaml:
#### (New) [Autogen](/docs/Use-Cases/Autogen)
-Autogen enables the next-gen GPT-X applications with a multi-agent conversation framework.
+Autogen enables the next-gen GPT-X applications with a generic multi-agent conversation framework.
It offers customizable and conversable agents which integrate LLMs, tools and human.
By automating chat among multiple capable agents, one can easily make them collectively perform tasks autonomously or with human feedback, including tasks that require using tools via code. For example,
```python
From 188c0520ec75cd44f6f26a1a149dfa7183e8247f Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Sun, 13 Aug 2023 15:42:37 +0000
Subject: [PATCH 06/13] doc
---
notebook/autogen_agentchat_MathChat.ipynb | 4 +-
notebook/autogen_agentchat_RetrieveChat.ipynb | 21 ++-
...at_auto_feedback_from_code_execution.ipynb | 2 +-
notebook/autogen_agentchat_chess.ipynb | 5 +-
.../autogen_agentchat_function_call.ipynb | 2 +-
notebook/autogen_agentchat_groupchat.ipynb | 5 +-
.../autogen_agentchat_human_feedback.ipynb | 2 +-
notebook/autogen_agentchat_planning.ipynb | 3 +-
notebook/autogen_agentchat_stream.ipynb | 2 +-
notebook/autogen_agentchat_two_users.ipynb | 2 +-
notebook/autogen_agentchat_web_info.ipynb | 2 +-
notebook/autogen_chatgpt_gpt4.ipynb | 2 +-
notebook/autogen_openai_completion.ipynb | 4 +-
website/docs/Examples/AutoGen-AgentChat.md | 15 ++
website/docs/Examples/AutoGen-OpenAI.md | 140 +-----------------
website/docs/Getting-Started.md | 2 +-
website/docs/Use-Cases/Autogen.md | 34 ++---
17 files changed, 71 insertions(+), 176 deletions(-)
create mode 100644 website/docs/Examples/AutoGen-AgentChat.md
diff --git a/notebook/autogen_agentchat_MathChat.ipynb b/notebook/autogen_agentchat_MathChat.ipynb
index cb9f2469d1..1194f96d4e 100644
--- a/notebook/autogen_agentchat_MathChat.ipynb
+++ b/notebook/autogen_agentchat_MathChat.ipynb
@@ -15,7 +15,9 @@
"source": [
"# Auto Generated Agent Chat: Using MathChat to Solve Math Problems\n",
"\n",
- "MathChat is a convesational framework for math problem solving. In this notebook, we demonstrate how to use MathChat to solve math problems. MathChat uses the `AssistantAgent` and `MathUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `MathUserProxyAgent` implements a different auto reply mechanism corresponding to the MathChat prompts. The original implementation and exeperiments of MathChat are in this [branch](https://github.com/kevin666aa/FLAML/tree/gpt_math_solver/flaml/autogen/math), and you can find more details in our paper [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337).\n",
+ "`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
+ "\n",
+ "MathChat is an experimental convesational framework for math problem solving. In this notebook, we demonstrate how to use MathChat to solve math problems. MathChat uses the `AssistantAgent` and `MathUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `MathUserProxyAgent` implements a different auto reply mechanism corresponding to the MathChat prompts. You can find more details in the paper [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337) or the [blogpost](https://microsoft.github.io/FLAML/blog/2023/06/28/MathChat).\n",
"\n",
"## Requirements\n",
"\n",
diff --git a/notebook/autogen_agentchat_RetrieveChat.ipynb b/notebook/autogen_agentchat_RetrieveChat.ipynb
index e031527bfd..fb2ab724b9 100644
--- a/notebook/autogen_agentchat_RetrieveChat.ipynb
+++ b/notebook/autogen_agentchat_RetrieveChat.ipynb
@@ -8,18 +8,21 @@
"\n",
"# Auto Generated Agent Chat: Using RetrieveChat for Retrieve Augmented Code Generation and Question Answering\n",
"\n",
- "RetrieveChat is a convesational framework for retrieve augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `RetrieveAssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb)). Essentially,`RetrieveAssistantAgent` and `RetrieveUserProxyAgent` implements a different auto reply mechanism corresponding to the RetrieveChat prompts.\n",
+ "`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
+ "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
+ "\n",
+ "RetrieveChat is a convesational system for retrieve augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `RetrieveAssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveAssistantAgent` and `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n",
"\n",
"## Table of Contents\n",
"We'll demonstrates five examples of using RetrieveChat for code generation and question answering:\n",
"\n",
- "[Example 1: Generate code based off docstrings w/o human feedbacks](#example-1)\n",
+ "[Example 1: Generate code based off docstrings w/o human feedback](#example-1)\n",
"\n",
- "[Example 2: Answer a question based off docstrings w/o human feedbacks](#example-2)\n",
+ "[Example 2: Answer a question based off docstrings w/o human feedback](#example-2)\n",
"\n",
- "[Example 3: Generate code based off docstrings w/ human feedbacks](#example-3)\n",
+ "[Example 3: Generate code based off docstrings w/ human feedback](#example-3)\n",
"\n",
- "[Example 4: Answer a question based off docstrings w/ human feedbacks](#example-4)\n",
+ "[Example 4: Answer a question based off docstrings w/ human feedback](#example-4)\n",
"\n",
"[Example 5: Solve comprehensive QA problems with RetrieveChat's unique feature `Update Context`](#example-5)\n",
"\n",
@@ -42,6 +45,7 @@
]
},
{
+ "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -86,6 +90,7 @@
]
},
{
+ "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -175,6 +180,7 @@
]
},
{
+ "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -1076,6 +1082,7 @@
]
},
{
+ "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -2409,6 +2416,7 @@
]
},
{
+ "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -3266,6 +3274,7 @@
]
},
{
+ "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -4431,6 +4440,7 @@
]
},
{
+ "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@@ -4961,6 +4971,7 @@
]
},
{
+ "attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
diff --git a/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb b/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
index 35fd5301dd..b0da606cc8 100644
--- a/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
+++ b/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
@@ -19,7 +19,7 @@
"source": [
"# Auto Generated Agent Chat: Task Solving with Code Generation, Execution & Debugging\n",
"\n",
- "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
+ "`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to write code and execute the code. Here `AssistantAgent` is an LLM-based agent that can write Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for the human user to execute the code written by `AssistantAgent`, or automatically execute the code. Depending on the setting of `human_input_mode` and `max_consecutive_auto_reply`, the `UserProxyAgent` either solicits feedback from the human user or returns auto-feedback based on the result of code execution (success or failure and corresponding outputs) to `AssistantAgent`. `AssistantAgent` will debug the code and suggest new code if the result contains error. The two agents keep communicating to each other until the task is done.\n",
diff --git a/notebook/autogen_agentchat_chess.ipynb b/notebook/autogen_agentchat_chess.ipynb
index f7dbf41d60..dccbda0ecb 100644
--- a/notebook/autogen_agentchat_chess.ipynb
+++ b/notebook/autogen_agentchat_chess.ipynb
@@ -15,7 +15,10 @@
"source": [
"# Auto Generated Agent Chat: Chess Game Playing While Chitchatting by GPT-4 Agents\n",
"\n",
- "Modified based on https://github.com/ekzhu/FLAML/blob/evaluation/evaluation/chess/play_chess.ipynb\n",
+ "`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
+ "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
+ "\n",
+ "This notebook is modified based on https://github.com/ekzhu/FLAML/blob/evaluation/evaluation/chess/play_chess.ipynb\n",
"\n",
"## Requirements\n",
"\n",
diff --git a/notebook/autogen_agentchat_function_call.ipynb b/notebook/autogen_agentchat_function_call.ipynb
index 423e2542fe..eba4522203 100644
--- a/notebook/autogen_agentchat_function_call.ipynb
+++ b/notebook/autogen_agentchat_function_call.ipynb
@@ -17,7 +17,7 @@
"source": [
"# Auto Generated Agent Chat: Task Solving with Provided Tools as Functions\n",
"\n",
- "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
+ "`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to make function calls with the new feature of OpenAI models (in model version 0613). A specified prompt and function configs need to be passed to `AssistantAgent` to initialize the agent. The corresponding functions need to be passed to `UserProxyAgent`, which will be responsible for executing any function calls made by `AssistantAgent`. Besides this requirement of matching descriptions with functions, we recommend checking the system message in the `AssistantAgent` to make sure the instructions align with the function call descriptions.\n",
"\n",
diff --git a/notebook/autogen_agentchat_groupchat.ipynb b/notebook/autogen_agentchat_groupchat.ipynb
index c8cdf32630..12657c24a5 100644
--- a/notebook/autogen_agentchat_groupchat.ipynb
+++ b/notebook/autogen_agentchat_groupchat.ipynb
@@ -15,7 +15,10 @@
"source": [
"# Auto Generated Agent Chat: Group Chat\n",
"\n",
- "Modified based on https://github.com/microsoft/FLAML/blob/4ea686af5c3e8ff24d9076a7a626c8b28ab5b1d7/notebook/autogen_multiagent_roleplay_chat.ipynb\n",
+ "`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
+ "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
+ "\n",
+ "This notebook is modified based on https://github.com/microsoft/FLAML/blob/4ea686af5c3e8ff24d9076a7a626c8b28ab5b1d7/notebook/autogen_multiagent_roleplay_chat.ipynb\n",
"\n",
"## Requirements\n",
"\n",
diff --git a/notebook/autogen_agentchat_human_feedback.ipynb b/notebook/autogen_agentchat_human_feedback.ipynb
index 1e8bf8271c..50a6a8a063 100644
--- a/notebook/autogen_agentchat_human_feedback.ipynb
+++ b/notebook/autogen_agentchat_human_feedback.ipynb
@@ -19,7 +19,7 @@
"source": [
"# Auto Generated Agent Chat: Task Solving with Code Generation, Execution, Debugging & Human Feedback\n",
"\n",
- "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
+ "`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to solve a challenging math problem with human feedback. Here `AssistantAgent` is an LLM-based agent that can write Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. By setting `human_input_mode` properly, the `UserProxyAgent` can also prompt the user for feedback to `AssistantAgent`. For example, when `human_input_mode` is set to \"ALWAYS\", the `UserProxyAgent` will always prompt the user for feedback. When user feedback is provided, the `UserProxyAgent` will directly pass the feedback to `AssistantAgent`. When no user feedback is provided, the `UserProxyAgent` will execute the code written by `AssistantAgent` and return the execution results (success or failure and corresponding outputs) to `AssistantAgent`.\n",
diff --git a/notebook/autogen_agentchat_planning.ipynb b/notebook/autogen_agentchat_planning.ipynb
index 206a46f8d7..52ad4e2c03 100644
--- a/notebook/autogen_agentchat_planning.ipynb
+++ b/notebook/autogen_agentchat_planning.ipynb
@@ -19,7 +19,8 @@
"source": [
"# Auto Generated Agent Chat: Collaborative Task Solving with Coding and Planning Agent\n",
"\n",
- "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
+ "`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
+ "Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use multiple agents to work together and accomplish a task which requires finding info from the web and coding. `AssistantAgent` is an LLM-based agent that can write and debug Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. We further create a planning agent for the assistant agent to consult. The planning agent is a variation of the LLM-based `AssistantAgent` with a different system message.\n",
"\n",
diff --git a/notebook/autogen_agentchat_stream.ipynb b/notebook/autogen_agentchat_stream.ipynb
index 4dc43a57ec..0bc4d7ad34 100644
--- a/notebook/autogen_agentchat_stream.ipynb
+++ b/notebook/autogen_agentchat_stream.ipynb
@@ -19,7 +19,7 @@
"source": [
"# Interactive LLM Agent Dealing with Data Stream\n",
"\n",
- "`flaml.autogen` offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
+ "`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use customized agents to continuously acquires news from the web and ask for investment suggestions.\n",
diff --git a/notebook/autogen_agentchat_two_users.ipynb b/notebook/autogen_agentchat_two_users.ipynb
index 2c78f8636e..29cbd0685e 100644
--- a/notebook/autogen_agentchat_two_users.ipynb
+++ b/notebook/autogen_agentchat_two_users.ipynb
@@ -19,7 +19,7 @@
"source": [
"# Auto Generated Agent Chat: Collaborative Task Solving with Multiple Agents and Human Users\n",
"\n",
- "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
+ "`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate an application involving multiple agents and human users to work together and accomplish a task. `AssistantAgent` is an LLM-based agent that can write Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. We create multiple `UserProxyAgent` instances which can represent different human users.\n",
"\n",
diff --git a/notebook/autogen_agentchat_web_info.ipynb b/notebook/autogen_agentchat_web_info.ipynb
index 78036e4036..88148ba898 100644
--- a/notebook/autogen_agentchat_web_info.ipynb
+++ b/notebook/autogen_agentchat_web_info.ipynb
@@ -19,7 +19,7 @@
"source": [
"# Auto Generated Agent Chat: Solving Tasks Requiring Web Info\n",
"\n",
- "FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
+ "`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance through multi-agent conversation.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Autogen#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to perform tasks which require acquiring info from the web:\n",
diff --git a/notebook/autogen_chatgpt_gpt4.ipynb b/notebook/autogen_chatgpt_gpt4.ipynb
index 122d8ce679..2ad947a136 100644
--- a/notebook/autogen_chatgpt_gpt4.ipynb
+++ b/notebook/autogen_chatgpt_gpt4.ipynb
@@ -23,7 +23,7 @@
"\n",
"# Use FLAML to Tune ChatGPT\n",
"\n",
- "FLAML offers a cost-effective hyperparameter optimization technique [EcoOptiGen](https://arxiv.org/abs/2303.04673) for tuning Large Language Models. Our study finds that tuning hyperparameters can significantly improve the utility of LLMs.\n",
+ "`flaml.autogen` offers a cost-effective hyperparameter optimization technique [EcoOptiGen](https://arxiv.org/abs/2303.04673) for tuning Large Language Models. The study finds that tuning hyperparameters can significantly improve the utility of LLMs.\n",
"\n",
"In this notebook, we tune OpenAI ChatGPT (both GPT-3.5 and GPT-4) models for math problem solving. We use [the MATH benchmark](https://crfm.stanford.edu/helm/latest/?group=math_chain_of_thought) for measuring mathematical problem solving on competition math problems with chain-of-thoughts style reasoning.\n",
"\n",
diff --git a/notebook/autogen_openai_completion.ipynb b/notebook/autogen_openai_completion.ipynb
index 76ba628883..cf8ebec583 100644
--- a/notebook/autogen_openai_completion.ipynb
+++ b/notebook/autogen_openai_completion.ipynb
@@ -23,9 +23,9 @@
"\n",
"# Use FLAML to Tune OpenAI Models\n",
"\n",
- "FLAML offers a cost-effective hyperparameter optimization technique [EcoOptiGen](https://arxiv.org/abs/2303.04673) for tuning Large Language Models. Our study finds that tuning hyperparameters can significantly improve the utility of LLMs.\n",
+ "`flaml.autogen` offers a cost-effective hyperparameter optimization technique [EcoOptiGen](https://arxiv.org/abs/2303.04673) for tuning Large Language Models. The research study finds that tuning hyperparameters can significantly improve the utility of LLMs.\n",
"\n",
- "In this notebook, we tune OpenAI models for code generation. We use [the HumanEval benchmark](https://huggingface.co/datasets/openai_humaneval) released by OpenAI for synthesizing programs from docstrings. \n",
+ "In this notebook, we tune OpenAI models for code generation. We use [the HumanEval benchmark](https://huggingface.co/datasets/openai_humaneval) released by OpenAI for synthesizing programs from docstrings.\n",
"\n",
"## Requirements\n",
"\n",
diff --git a/website/docs/Examples/AutoGen-AgentChat.md b/website/docs/Examples/AutoGen-AgentChat.md
new file mode 100644
index 0000000000..a136aa71ef
--- /dev/null
+++ b/website/docs/Examples/AutoGen-AgentChat.md
@@ -0,0 +1,15 @@
+# AutoGen - Automated Multi Agent Chat
+
+`flaml.autogen` offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framwork allows tool use and human participance via multi-agent conversation.
+Please find documentation about this feature [here](/docs/Use-Cases/Autogen#agents).
+
+Links to notebook examples:
+* [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb)
+* [Auto Code Generation, Execution, Debugging and Human Feedback](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_human_feedback.ipynb)
+* [Solve Tasks Requiring Web Info](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_web_info.ipynb)
+* [Use Provided Tools as Functions](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_function_call.ipynb)
+* [Automated Task Solving with Coding & Planning Agents](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_planning.ipynb)
+* [Automated Task Solving with GPT-4 + Multiple Human Users](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_two_users.ipynb)
+* [Automated Chess Game Playing & Chitchatting by GPT-4 Agents](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_chess.ipynb)
+* [Automated Task Solving by Group Chat](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_groupchat.ipynb)
+* [Automated Continual Learning from New Data](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_stream.ipynb)
diff --git a/website/docs/Examples/AutoGen-OpenAI.md b/website/docs/Examples/AutoGen-OpenAI.md
index d8dc9c8a1d..b21ffc5beb 100644
--- a/website/docs/Examples/AutoGen-OpenAI.md
+++ b/website/docs/Examples/AutoGen-OpenAI.md
@@ -1,138 +1,8 @@
# AutoGen - Tune GPT Models
-FLAML offers a cost-effective hyperparameter optimization technique [EcoOptiGen](https://arxiv.org/abs/2303.04673) for tuning Large Language Models. Our study finds that tuning hyperparameters can significantly improve the utility of them.
-In this example, we will tune several hyperparameters of OpenAI models, including the temperature, prompt, n (number of completions) and max_tokens, to optimize the inference performance for a code generation task.
+`flaml.autogen` offers a cost-effective hyperparameter optimization technique [EcoOptiGen](https://arxiv.org/abs/2303.04673) for tuning Large Language Models. The research study finds that tuning hyperparameters can significantly improve the utility of them.
+Please find documentation about this feature [here](/docs/Use-Cases/Auto-Generation#enhanced-inference).
-### Prerequisites
-
-Install the [autogen,blendsearch] option.
-```bash
-pip install "flaml[autogen,blendsearch] datasets"
-```
-
-Setup your OpenAI key:
-```python
-import os
-
-if "OPENAI_API_KEY" not in os.environ:
- os.environ["OPENAI_API_KEY"] = ""
-```
-
-If you use Azure OpenAI, set up Azure using the following code:
-
-```python
-import openai
-
-openai.api_type = "azure"
-openai.api_base = "https://.openai.azure.com/"
-openai.api_version = "2023-03-15-preview" # change if necessary
-```
-
-### Load the dataset
-
-We use the HumanEval dataset as an example. The dataset contains 164 examples. We use the first 20 for tuning the generation hyperparameters and the remaining for evaluation. In each example, the "prompt" is the prompt string for eliciting the code generation, "test" is the Python code for unit test for the example, and "entry_point" is the function name to be tested.
-
-```python
-import datasets
-
-seed = 41
-data = datasets.load_dataset("openai_humaneval")["test"].shuffle(seed=seed)
-n_tune_data = 20
-tune_data = [
- {
- "definition": data[x]["prompt"],
- "test": data[x]["test"],
- "entry_point": data[x]["entry_point"],
- }
- for x in range(n_tune_data)
-]
-test_data = [
- {
- "definition": data[x]["prompt"],
- "test": data[x]["test"],
- "entry_point": data[x]["entry_point"],
- }
- for x in range(n_tune_data, len(data))
-]
-```
-
-### Define the metric
-
-Before starting tuning, you need to define the metric for the optimization. For each code generation task, we can use the model to generate multiple candidate responses, and then select one from them. If the final selected response can pass a unit test, we consider the task as successfully solved. Then we can define the average success rate on a collection of tasks as the optimization metric.
-
-```python
-from functools import partial
-from flaml.autogen.code_utils import eval_function_completions, generate_assertions
-
-eval_with_generated_assertions = partial(
- eval_function_completions, assertions=generate_assertions,
-)
-```
-
-This function will first generate assertion statements for each problem. Then, it uses the assertions to select the generated responses.
-
-### Tune the hyperparameters
-
-The tuning will be performed under the specified optimization budgets.
-
-* inference_budget is the target average inference budget per instance in the benchmark. For example, 0.02 means the target inference budget is 0.02 dollars, which translates to 1000 tokens (input + output combined) if the text Davinci model is used.
-* optimization_budget is the total budget allowed to perform the tuning. For example, 5 means 5 dollars are allowed in total, which translates to 250K tokens for the text Davinci model.
-* num_sumples is the number of different hyperparameter configurations which is allowed to try. The tuning will stop after either num_samples trials or after optimization_budget dollars spent, whichever happens first. -1 means no hard restriction in the number of trials and the actual number is decided by optimization_budget.
-
-Users can specify tuning data, optimization metric, optimization mode, evaluation function, search spaces etc.
-
-```python
-from flaml import autogen
-
-config, analysis = autogen.Completion.tune(
- data=tune_data, # the data for tuning
- metric="success", # the metric to optimize
- mode="max", # the optimization mode
- eval_func=eval_with_generated_assertions, # the evaluation function to return the success metrics
- # log_file_name="logs/humaneval.log", # the log file name
- inference_budget=0.05, # the inference budget (dollar per instance)
- optimization_budget=3, # the optimization budget (dollar in total)
- # num_samples can further limit the number of trials for different hyperparameter configurations;
- # -1 means decided by the optimization budget only
- num_samples=-1,
- prompt=[
- "{definition}",
- "# Python 3{definition}",
- "Complete the following Python function:{definition}",
- ], # the prompt templates to choose from
- stop=[["\nclass", "\ndef", "\nif", "\nprint"], None], # the stop sequences
- allow_format_str_template=True,
-)
-```
-
-#### Output tuning results
-
-After the tuning, we can print out the optimized config and the result:
-
-```python
-print("optimized config", config)
-print("best result on tuning data", analysis.best_result)
-```
-
-#### Make a request with the tuned config
-
-We can apply the tuned config to the request for an instance:
-
-```python
-response = autogen.Completion.create(context=tune_data[1], **config)
-print(response)
-print(eval_with_generated_assertions(autogen.Completion.extract_text(response), **tune_data[1]))
-```
-
-#### Evaluate the success rate on the test data
-
-You can use `autogen.Completion.test` to evaluate the performance of an entire dataset with the tuned config.
-
-```python
-result = autogen.Completion.test(test_data, **config)
-print("performance on test data with the tuned config:", result)
-```
-
-The result will vary with the inference budget and optimization budget.
-
-[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_openai_completion.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/autogen_openai_completion.ipynb)
+Links to notebook examples:
+* [Optimize for Code Generation](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_openai_completion.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/autogen_openai_completion.ipynb)
+* [Optimize for Math](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_chatgpt_gpt4.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/autogen_chatgpt_gpt4.ipynb)
diff --git a/website/docs/Getting-Started.md b/website/docs/Getting-Started.md
index 718f3685fc..a8889d7116 100644
--- a/website/docs/Getting-Started.md
+++ b/website/docs/Getting-Started.md
@@ -33,7 +33,7 @@ user_proxy.initiate_chat(assistant, message="Show me the YTD gain of 10 largest
# This initiates an automated chat between the two agents to solve the task
```
-Autogen also helps maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4. It offers a drop-in replacement of `openai.Completion` or `openai.ChatCompletion` with powerful functionalites like tuning, caching, templating, filtering. For example, you can optimize generations by LLM with your own tuning data, success metrics and budgets.
+Autogen also helps maximize the utility out of the expensive LLMs such as ChatGPT and GPT-4. It offers a drop-in replacement of `openai.Completion` or `openai.ChatCompletion` with powerful functionalites like tuning, caching, error handling, templating. For example, you can optimize generations by LLM with your own tuning data, success metrics and budgets.
```python
# perform tuning
config, analysis = autogen.Completion.tune(
diff --git a/website/docs/Use-Cases/Autogen.md b/website/docs/Use-Cases/Autogen.md
index 80807fb911..7592e59a4f 100644
--- a/website/docs/Use-Cases/Autogen.md
+++ b/website/docs/Use-Cases/Autogen.md
@@ -1,23 +1,22 @@
-# AutoGen: AutoML for GPT-X Applications
+# AutoGen: Enabling Next-Gen GPT-X Applications
-`flaml.autogen` simplifies hard choices (such as model, prompt, inference parameters and orchestration choices) for developers when finding an optimal operating point in a large and complex design space of large language model (LLM) hierarchy, and offers a virtual interface to highly capable, economical, and fast LLM agents.
+`flaml.autogen` simplifies the orchestration, automation and optimization of a complex GPT-X workflow. It maximizes the performance of GPT-X models and augments their weakness. It enables building next-gen GPT-X applications based on multi-agent conversations with minimal effort.
## Features
-* An enhanced inference API as a drop-in replacement of `openai.Completion.create` or `openai.ChatCompletion.create`. It allows easy performance tuning and advanced usage patterns, including:
- - Leveraging [`flaml.tune`](/docs/reference/tune/tune) to adapt LLMs to applications, to maximize the utility out of using expensive foundation models and reduce the inference cost by using cheaper models or configurations which achieve equal or better performance.
- - Utilities like API unification, caching, error handling, multi-config inference, context programming etc.
-* A higher-level abstraction of using foundation models: intelligent agents which can perform tasks autonomously or with human feedback. The same abstraction allows both automated feedback and human feedback sent between agents, so that complex tasks can be accomplished via agent collaborations, including tasks that require using tools via code.
+* A unified multi-agent conversation framework as a high-level abstraction of using foundation models. It offers customizable and conversable agents which integrate LLM, tool and human.
+By automating chat among multiple capable agents, one can easily make them collectively perform tasks autonomously or with human feedback, including tasks that require using tools via code.
+* A drop-in replacement of `openai.Completion` or `openai.ChatCompletion` as an enhanced inference API. It allows easy performance tuning, utilities like API unification & caching, and advanced usage patterns, such as error handling, multi-config inference, context programming etc.
The package is under active development with more features upcoming.
## Agents
-[`flaml.autogen.agentchat`](/docs/reference/autogen/agentchat/agent) offers conversable agents which can adapt to human or simulated feedback. This subpackage is under active development.
+[`flaml.autogen.agentchat`](/docs/reference/autogen/agentchat/agent) offers a multi-agent conversation framework, featuring capable, customizable and conversable agents which integrate LLM, tool and human via automated agent chat.
### Basic Concept
-We have designed a generic `ResponsiveAgent` class for Agents that are capable of conversing with each other through the exchange of messages to collaboratively finish a task. An agent can communicate with other agents and perform actions. Different agents can differ in what actions they perform after receiving messages. Two representative subclasses are `AssistantAgent` and `UserProxyAgent`.
+We have designed a generic `ResponsiveAgent` class for Agents that are capable of conversing with each other through the exchange of messages to jointly finish a task. An agent can communicate with other agents and perform actions. Different agents can differ in what actions they perform after receiving messages. Two representative subclasses are `AssistantAgent` and `UserProxyAgent`.
- `AssistantAgent`. Designed to act as an assistant by responding to user requests. It could write Python code (in a Python coding block) for a user to execute when a message (typically a description of a task that needs to be solved) is received. Under the hood, the Python code is written by LLM (e.g., GPT-4). It can also receive the execution results and suggest code with bug fix. Its behavior can be altered by passing a new system message. The LLM [inference](#enhanced-inference) configuration can be configured via `llm_config`.
- `UserProxyAgent`. Serves as a proxy for the human user. Upon receiving a message, the UserProxyAgent will either solicit the human user's input or prepare an automatically generated reply. The chosen action depends on the settings of the `human_input_mode` and `max_consecutive_auto_reply` when the `UserProxyAgent` instance is constructed, and whether a human user input is available.
@@ -150,30 +149,26 @@ user_proxy.initiate_chat(
*Interested in trying it yourself? Please check the following notebook examples:*
* [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb)
-
* [Auto Code Generation, Execution, Debugging and Human Feedback](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_human_feedback.ipynb)
-
* [Solve Tasks Requiring Web Info](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_web_info.ipynb)
-
* [Use Provided Tools as Functions](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_function_call.ipynb)
-
* [Automated Task Solving with Coding & Planning Agents](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_planning.ipynb)
-
* [Automated Task Solving with GPT-4 + Multiple Human Users](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_two_users.ipynb)
-
* [Automated Chess Game Playing & Chitchatting by GPT-4 Agents](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_chess.ipynb)
-
* [Automated Task Solving by Group Chat](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_groupchat.ipynb)
-
* [Automated Continual Learning from New Data](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_stream.ipynb)
## Enhanced Inference
One can use [`flaml.autogen.Completion.create`](/docs/reference/autogen/oai/completion#create) to perform inference.
-There are a number of benefits of using `autogen` to perform inference.
+There are a number of benefits of using `autogen` to perform inference: performance tuning, API unification, caching, error handling, multi-config inference, result filtering, templating and so on.
### Tune Inference Parameters
+*Links to notebook examples:*
+* [Optimize for Code Generation](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_openai_completion.ipynb)
+* [Optimize for Math](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_chatgpt_gpt4.ipynb)
+
#### Choices to optimize
The cost of using foundation models for text generation is typically measured in terms of the number of tokens in the input and output combined. From the perspective of an application builder using foundation models, the use case is to maximize the utility of the generated text under an inference budget constraint (e.g., measured by the average dollar cost needed to solve a coding problem). This can be achieved by optimizing the hyperparameters of the inference,
@@ -271,11 +266,6 @@ The returned `config` contains the optimized configuration and `analysis` contai
The tuend config can be used to perform inference.
-*Refer to this [page](/docs/Examples/AutoGen-OpenAI) for a full example. Or check the following notebook examples:*
-* [Optimize for Code Generation](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_openai_completion.ipynb)
-* [Optimize for Math](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_chatgpt_gpt4.ipynb)
-
-
### API unification
`flaml.autogen.Completion.create` is compatible with both `openai.Completion.create` and `openai.ChatCompletion.create`, and both OpenAI API and Azure OpenAI API. So models such as "text-davinci-003", "gpt-3.5-turbo" and "gpt-4" can share a common API.
From 766b475bf7a0c3b1e0443892e17d5308988a2efd Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Sun, 13 Aug 2023 16:01:16 +0000
Subject: [PATCH 07/13] url
---
notebook/autogen_chatgpt_gpt4.ipynb | 1 +
notebook/autogen_openai_completion.ipynb | 1 +
website/docs/Examples/AutoGen-OpenAI.md | 2 +-
3 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/notebook/autogen_chatgpt_gpt4.ipynb b/notebook/autogen_chatgpt_gpt4.ipynb
index 2ad947a136..04007d33f8 100644
--- a/notebook/autogen_chatgpt_gpt4.ipynb
+++ b/notebook/autogen_chatgpt_gpt4.ipynb
@@ -24,6 +24,7 @@
"# Use FLAML to Tune ChatGPT\n",
"\n",
"`flaml.autogen` offers a cost-effective hyperparameter optimization technique [EcoOptiGen](https://arxiv.org/abs/2303.04673) for tuning Large Language Models. The study finds that tuning hyperparameters can significantly improve the utility of LLMs.\n",
+ "Please find documentation about this feature [here](/docs/Use-Cases/AutoGen#enhanced-inference).\n",
"\n",
"In this notebook, we tune OpenAI ChatGPT (both GPT-3.5 and GPT-4) models for math problem solving. We use [the MATH benchmark](https://crfm.stanford.edu/helm/latest/?group=math_chain_of_thought) for measuring mathematical problem solving on competition math problems with chain-of-thoughts style reasoning.\n",
"\n",
diff --git a/notebook/autogen_openai_completion.ipynb b/notebook/autogen_openai_completion.ipynb
index cf8ebec583..2194fd8bf1 100644
--- a/notebook/autogen_openai_completion.ipynb
+++ b/notebook/autogen_openai_completion.ipynb
@@ -24,6 +24,7 @@
"# Use FLAML to Tune OpenAI Models\n",
"\n",
"`flaml.autogen` offers a cost-effective hyperparameter optimization technique [EcoOptiGen](https://arxiv.org/abs/2303.04673) for tuning Large Language Models. The research study finds that tuning hyperparameters can significantly improve the utility of LLMs.\n",
+ "Please find documentation about this feature [here](/docs/Use-Cases/AutoGen#enhanced-inference).\n",
"\n",
"In this notebook, we tune OpenAI models for code generation. We use [the HumanEval benchmark](https://huggingface.co/datasets/openai_humaneval) released by OpenAI for synthesizing programs from docstrings.\n",
"\n",
diff --git a/website/docs/Examples/AutoGen-OpenAI.md b/website/docs/Examples/AutoGen-OpenAI.md
index b21ffc5beb..f86b2328fc 100644
--- a/website/docs/Examples/AutoGen-OpenAI.md
+++ b/website/docs/Examples/AutoGen-OpenAI.md
@@ -1,7 +1,7 @@
# AutoGen - Tune GPT Models
`flaml.autogen` offers a cost-effective hyperparameter optimization technique [EcoOptiGen](https://arxiv.org/abs/2303.04673) for tuning Large Language Models. The research study finds that tuning hyperparameters can significantly improve the utility of them.
-Please find documentation about this feature [here](/docs/Use-Cases/Auto-Generation#enhanced-inference).
+Please find documentation about this feature [here](/docs/Use-Cases/Autogen#enhanced-inference).
Links to notebook examples:
* [Optimize for Code Generation](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_openai_completion.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/autogen_openai_completion.ipynb)
From 3d4978f3a724f50efcd9a77945af4536bc211182 Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Sun, 13 Aug 2023 18:21:35 +0000
Subject: [PATCH 08/13] endpoints
---
notebook/autogen_agentchat_MathChat.ipynb | 4 +++-
...gentchat_auto_feedback_from_code_execution.ipynb | 2 +-
notebook/autogen_agentchat_chess.ipynb | 4 ++--
notebook/autogen_agentchat_groupchat.ipynb | 4 ++--
notebook/autogen_agentchat_planning.ipynb | 2 +-
notebook/autogen_agentchat_two_users.ipynb | 2 +-
notebook/autogen_agentchat_web_info.ipynb | 3 +--
notebook/autogen_openai_completion.ipynb | 1 +
test/autogen/agentchat/test_assistant_agent.py | 3 ++-
.../autogen/agentchat/test_math_user_proxy_agent.py | 2 +-
test/autogen/oai/test_completion.py | 13 +++++++------
11 files changed, 22 insertions(+), 18 deletions(-)
diff --git a/notebook/autogen_agentchat_MathChat.ipynb b/notebook/autogen_agentchat_MathChat.ipynb
index cb9f2469d1..7a2dfbd19c 100644
--- a/notebook/autogen_agentchat_MathChat.ipynb
+++ b/notebook/autogen_agentchat_MathChat.ipynb
@@ -60,11 +60,13 @@
" \"gpt4\",\n",
" \"gpt-4-32k\",\n",
" \"gpt-4-32k-0314\",\n",
+ " \"gpt-4-32k-v0314\",\n",
" \"gpt-3.5-turbo\",\n",
" \"gpt-3.5-turbo-16k\",\n",
" \"gpt-3.5-turbo-0301\",\n",
" \"chatgpt-35-turbo-0301\",\n",
" \"gpt-35-turbo-v0301\",\n",
+ " \"gpt\",\n",
" }\n",
" }\n",
")"
@@ -75,7 +77,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "It first looks for environment variable \"OAI_CONFIG_LIST\" which needs to be a valid json string. If that variable is not found, it then looks for a json file named \"OAI_CONFIG_LIST\". It filters the configs by models (you can filter by other keys as well). Only the gpt-4 and gpt-3.5-turbo models are kept in the list based on the filter condition.\n",
+ "It first looks for environment variable \"OAI_CONFIG_LIST\" which needs to be a valid json string. If that variable is not found, it then looks for a json file named \"OAI_CONFIG_LIST\". It filters the configs by models (you can filter by other keys as well).\n",
"\n",
"The config list looks like the following:\n",
"```python\n",
diff --git a/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb b/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
index 35fd5301dd..a7ce9c424c 100644
--- a/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
+++ b/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb
@@ -69,7 +69,7 @@
"config_list = autogen.config_list_from_json(\n",
" \"OAI_CONFIG_LIST\",\n",
" filter_dict={\n",
- " \"model\": [\"gpt-4\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\"],\n",
+ " \"model\": [\"gpt-4\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\", \"gpt-4-32k-v0314\"],\n",
" },\n",
")"
]
diff --git a/notebook/autogen_agentchat_chess.ipynb b/notebook/autogen_agentchat_chess.ipynb
index f7dbf41d60..2a329a95b0 100644
--- a/notebook/autogen_agentchat_chess.ipynb
+++ b/notebook/autogen_agentchat_chess.ipynb
@@ -67,7 +67,7 @@
"config_list_gpt4 = autogen.config_list_from_json(\n",
" \"OAI_CONFIG_LIST\",\n",
" filter_dict={\n",
- " \"model\": [\"gpt-4\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\"],\n",
+ " \"model\": [\"gpt-4\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\", \"gpt-4-32k-v0314\"],\n",
" },\n",
")\n",
"# config_list_gpt35 = autogen.config_list_from_json(\n",
@@ -1027,7 +1027,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.16"
+ "version": "3.9.17"
},
"orig_nbformat": 4
},
diff --git a/notebook/autogen_agentchat_groupchat.ipynb b/notebook/autogen_agentchat_groupchat.ipynb
index c8cdf32630..d1f0d998ae 100644
--- a/notebook/autogen_agentchat_groupchat.ipynb
+++ b/notebook/autogen_agentchat_groupchat.ipynb
@@ -56,7 +56,7 @@
"config_list_gpt4 = autogen.config_list_from_json(\n",
" \"OAI_CONFIG_LIST\",\n",
" filter_dict={\n",
- " \"model\": [\"gpt-4\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\"],\n",
+ " \"model\": [\"gpt-4\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\", \"gpt-4-32k-v0314\"],\n",
" },\n",
")\n",
"# config_list_gpt35 = autogen.config_list_from_json(\n",
@@ -472,7 +472,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.16"
+ "version": "3.9.17"
},
"orig_nbformat": 4
},
diff --git a/notebook/autogen_agentchat_planning.ipynb b/notebook/autogen_agentchat_planning.ipynb
index 206a46f8d7..cbc97d2ca4 100644
--- a/notebook/autogen_agentchat_planning.ipynb
+++ b/notebook/autogen_agentchat_planning.ipynb
@@ -74,7 +74,7 @@
"config_list = autogen.config_list_from_json(\n",
" \"OAI_CONFIG_LIST\",\n",
" filter_dict={\n",
- " \"model\": [\"gpt-4\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\"],\n",
+ " \"model\": [\"gpt-4\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\", \"gpt-4-32k-v0314\"],\n",
" },\n",
")"
]
diff --git a/notebook/autogen_agentchat_two_users.ipynb b/notebook/autogen_agentchat_two_users.ipynb
index 2c78f8636e..48cbde6e22 100644
--- a/notebook/autogen_agentchat_two_users.ipynb
+++ b/notebook/autogen_agentchat_two_users.ipynb
@@ -74,7 +74,7 @@
"config_list = autogen.config_list_from_json(\n",
" \"OAI_CONFIG_LIST\",\n",
" filter_dict={\n",
- " \"model\": [\"gpt-4\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\"],\n",
+ " \"model\": [\"gpt-4\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\", \"gpt-4-32k-v0314\"],\n",
" },\n",
")"
]
diff --git a/notebook/autogen_agentchat_web_info.ipynb b/notebook/autogen_agentchat_web_info.ipynb
index 78036e4036..7ca2431d10 100644
--- a/notebook/autogen_agentchat_web_info.ipynb
+++ b/notebook/autogen_agentchat_web_info.ipynb
@@ -73,7 +73,7 @@
"config_list = autogen.config_list_from_json(\n",
" \"OAI_CONFIG_LIST\",\n",
" filter_dict={\n",
- " \"model\": [\"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\"],\n",
+ " \"model\": [\"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\", \"gpt-4-32k-v0314\"],\n",
" },\n",
")\n",
"\n",
@@ -81,7 +81,6 @@
" \"request_timeout\": 600,\n",
" \"seed\": 42,\n",
" \"config_list\": config_list,\n",
- " \"model\": \"gpt-4-32k\", # modify if the endpoint you use doesn't support this model\n",
" \"temperature\": 0,\n",
"}"
]
diff --git a/notebook/autogen_openai_completion.ipynb b/notebook/autogen_openai_completion.ipynb
index 76ba628883..bd42607767 100644
--- a/notebook/autogen_openai_completion.ipynb
+++ b/notebook/autogen_openai_completion.ipynb
@@ -111,6 +111,7 @@
" \"gpt-3.5-turbo-0301\",\n",
" \"chatgpt-35-turbo-0301\",\n",
" \"gpt-35-turbo-v0301\",\n",
+ " \"gpt\",\n",
" },\n",
" },\n",
")\n",
diff --git a/test/autogen/agentchat/test_assistant_agent.py b/test/autogen/agentchat/test_assistant_agent.py
index 0fff5a0f9d..e1e2c10b05 100644
--- a/test/autogen/agentchat/test_assistant_agent.py
+++ b/test/autogen/agentchat/test_assistant_agent.py
@@ -72,6 +72,7 @@ def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
"gpt-3.5-turbo-0301",
"chatgpt-35-turbo-0301",
"gpt-35-turbo-v0301",
+ "gpt",
},
},
)
@@ -162,7 +163,7 @@ def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10):
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
- "model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314"],
+ "model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
},
)
hard_questions = [
diff --git a/test/autogen/agentchat/test_math_user_proxy_agent.py b/test/autogen/agentchat/test_math_user_proxy_agent.py
index 83c800a39f..a40ff718e9 100644
--- a/test/autogen/agentchat/test_math_user_proxy_agent.py
+++ b/test/autogen/agentchat/test_math_user_proxy_agent.py
@@ -28,7 +28,7 @@ def test_math_user_proxy_agent():
OAI_CONFIG_LIST,
file_location=KEY_LOC,
filter_dict={
- "model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314"],
+ "model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
},
)
assistant = AssistantAgent(
diff --git a/test/autogen/oai/test_completion.py b/test/autogen/oai/test_completion.py
index f169930a92..ba355fe1b5 100644
--- a/test/autogen/oai/test_completion.py
+++ b/test/autogen/oai/test_completion.py
@@ -137,6 +137,7 @@ def test_nocontext():
"gpt-3.5-turbo-0301",
"chatgpt-35-turbo-0301",
"gpt-35-turbo-v0301",
+ "gpt",
},
},
),
@@ -171,6 +172,7 @@ def test_humaneval(num_samples=1):
"gpt-3.5-turbo-0301",
"chatgpt-35-turbo-0301",
"gpt-35-turbo-v0301",
+ "gpt",
},
},
)
@@ -427,10 +429,9 @@ def my_average(results):
assert len(config_list) >= 3, config_list
openai.api_key = os.environ["OPENAI_API_KEY"]
- # test_filter()
+ test_filter()
test_chatcompletion()
- # test_multi_model()
- # test_improve()
- # test_nocontext()
- # test_humaneval(1)
- # test_math(1)
+ test_multi_model()
+ test_nocontext()
+ test_humaneval(1)
+ test_math(1)
From cae94bb8b22183b60fc846fd3009ee678c75d25a Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Sun, 13 Aug 2023 18:25:18 +0000
Subject: [PATCH 09/13] timeout
---
test/autogen/oai/test_completion.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/test/autogen/oai/test_completion.py b/test/autogen/oai/test_completion.py
index ba355fe1b5..52bc3a2fdb 100644
--- a/test/autogen/oai/test_completion.py
+++ b/test/autogen/oai/test_completion.py
@@ -254,6 +254,7 @@ def test_humaneval(num_samples=1):
messages=[{"role": "user", "content": "{definition}"}],
config_list=config_list,
allow_format_str_template=True,
+ request_timeout=120,
)
response = autogen.ChatCompletion.create(context=test_data[0], config_list=config_list, **config)
print(response)
From b493adefb78d4c20e8b874c331b13b7c956efe0d Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Sun, 13 Aug 2023 18:41:08 +0000
Subject: [PATCH 10/13] chess
---
notebook/autogen_agentchat_chess.ipynb | 29 ++++----------------------
1 file changed, 4 insertions(+), 25 deletions(-)
diff --git a/notebook/autogen_agentchat_chess.ipynb b/notebook/autogen_agentchat_chess.ipynb
index 27cae7682e..489a6c5aa5 100644
--- a/notebook/autogen_agentchat_chess.ipynb
+++ b/notebook/autogen_agentchat_chess.ipynb
@@ -146,18 +146,6 @@
"You translate user's natural language input into legal UCI moves.\n",
"You should only reply with a UCI move string extracted from user's input.\"\"\"\n",
"\n",
- "# user_msg_1 = \"\"\"Alright, let's get this game started. I'll move pawn from e2 to e4. That's a classic opening, isn't it? Your turn.\"\"\"\n",
- "# asst_msg_1 = \"e2e4\"\n",
- "# user_msg_2 = \"\"\"I am going to move my pawn from e7 to e5.\"\"\"\n",
- "# asst_msg_2 = \"e7e5\"\n",
- "\n",
- "# examples = [\n",
- "# {\"role\": \"user\", \"content\": user_msg_1},\n",
- "# {\"role\": \"assistant\", \"content\": asst_msg_1},\n",
- "# {\"role\": \"user\", \"content\": user_msg_2},\n",
- "# {\"role\": \"assistant\", \"content\": asst_msg_2},\n",
- "# ]\n",
- "\n",
"class BoardAgent(autogen.AssistantAgent):\n",
" board: chess.Board\n",
" correct_move_messages: Dict[autogen.Agent, List[Dict]]\n",
@@ -179,21 +167,15 @@
" sender: Optional[autogen.Agent] = None,\n",
" config: Optional[Any] = None,\n",
" ) -> Union[str, Dict, None]:\n",
- " # Filter for messages that do not contain error.\n",
" message = messages[-1]\n",
- " assert message.get(\"role\") == \"user\"\n",
" # extract a UCI move from player's message\n",
" reply = self.generate_reply(self.correct_move_messages[sender] + [message], sender, exclude=[BoardAgent._generate_board_reply])\n",
- " if isinstance(reply, str):\n",
- " uci_move = reply\n",
- " else:\n",
- " uci_move = str(reply[\"content\"])\n",
+ " uci_move = reply if isinstance(reply, str) else str(reply[\"content\"])\n",
" try:\n",
" self.board.push_uci(uci_move)\n",
" except ValueError as e:\n",
" # invalid move\n",
- " error = f\"Error: {e}\"\n",
- " return True, error\n",
+ " return True, f\"Error: {e}\"\n",
" else:\n",
" # valid move\n",
" m = chess.Move.from_uci(uci_move)\n",
@@ -229,10 +211,7 @@
" ):\n",
" if color not in [\"white\", \"black\"]:\n",
" raise ValueError(f\"color must be either white or black, but got {color}\")\n",
- " if color == \"white\":\n",
- " opponent_color = \"black\"\n",
- " else:\n",
- " opponent_color = \"white\"\n",
+ " opponent_color = \"black\" if color == \"white\" else \"white\"\n",
" name = f\"Player {color}\"\n",
" opponent_name = f\"Player {opponent_color}\"\n",
" sys_msg = sys_msg_tmpl.format(\n",
@@ -1030,7 +1009,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.17"
+ "version": "3.9.16"
},
"orig_nbformat": 4
},
From 0fa923aaf652091b99d59200a955e4ca4657478b Mon Sep 17 00:00:00 2001
From: Li Jiang
Date: Mon, 14 Aug 2023 11:12:11 +0800
Subject: [PATCH 11/13] Fix retrieve chat
---
flaml/autogen/agentchat/contrib/retrieve_assistant_agent.py | 6 +++---
.../autogen/agentchat/contrib/retrieve_user_proxy_agent.py | 6 +++---
notebook/autogen_agentchat_RetrieveChat.ipynb | 6 +++---
test/autogen/agentchat/test_retrievechat.py | 2 +-
4 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/flaml/autogen/agentchat/contrib/retrieve_assistant_agent.py b/flaml/autogen/agentchat/contrib/retrieve_assistant_agent.py
index b254e9e987..694d7d45da 100644
--- a/flaml/autogen/agentchat/contrib/retrieve_assistant_agent.py
+++ b/flaml/autogen/agentchat/contrib/retrieve_assistant_agent.py
@@ -22,10 +22,10 @@ def _generate_retrieve_assistant_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
- context: Optional[Any] = None,
+ config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
- if context is None:
- context = self
+ if config is None:
+ config = self
if messages is None:
messages = self._oai_messages[sender]
message = messages[-1]
diff --git a/flaml/autogen/agentchat/contrib/retrieve_user_proxy_agent.py b/flaml/autogen/agentchat/contrib/retrieve_user_proxy_agent.py
index f513a38109..35fb3ad302 100644
--- a/flaml/autogen/agentchat/contrib/retrieve_user_proxy_agent.py
+++ b/flaml/autogen/agentchat/contrib/retrieve_user_proxy_agent.py
@@ -207,10 +207,10 @@ def _generate_retrieve_user_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
- context: Optional[Any] = None,
+ config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
- if context is None:
- context = self
+ if config is None:
+ config = self
if messages is None:
messages = self._oai_messages[sender]
message = messages[-1]
diff --git a/notebook/autogen_agentchat_RetrieveChat.ipynb b/notebook/autogen_agentchat_RetrieveChat.ipynb
index fb2ab724b9..c4b5fd52db 100644
--- a/notebook/autogen_agentchat_RetrieveChat.ipynb
+++ b/notebook/autogen_agentchat_RetrieveChat.ipynb
@@ -29,7 +29,7 @@
"\n",
"## Requirements\n",
"\n",
- "FLAML requires `Python>=3.8`. To run this notebook example, please install flaml with the [mathchat] option.\n",
+ "FLAML requires `Python>=3.8`. To run this notebook example, please install flaml with the [retrievechat] option.\n",
"```bash\n",
"pip install flaml[retrievechat]\n",
"```"
@@ -41,7 +41,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# %pip install flaml[retrievechat]~=2.0.0rc5"
+ "# %pip install flaml[retrievechat]~=2.0.0"
]
},
{
@@ -132,7 +132,7 @@
"source": [
"## Construct agents for RetrieveChat\n",
"\n",
- "We start by initialzing the `RetrieveAssistantAgent` and `RetrieveUserProxyAgent`. The system message needs to be set to \"You are a helpful assistant.\" for RetrieveAssistantAgent. The detailed instructions are given in the user message. Later we will use the `RetrieveUserProxyAgent.generate_init_prompt` to combine the instructions and a math problem for an initial prompt to be sent to the LLM assistant."
+ "We start by initialzing the `RetrieveAssistantAgent` and `RetrieveUserProxyAgent`. The system message needs to be set to \"You are a helpful assistant.\" for RetrieveAssistantAgent. The detailed instructions are given in the user message. Later we will use the `RetrieveUserProxyAgent.generate_init_prompt` to combine the instructions and a retrieval augmented generation task for an initial prompt to be sent to the LLM assistant."
]
},
{
diff --git a/test/autogen/agentchat/test_retrievechat.py b/test/autogen/agentchat/test_retrievechat.py
index e868f429fc..761665d575 100644
--- a/test/autogen/agentchat/test_retrievechat.py
+++ b/test/autogen/agentchat/test_retrievechat.py
@@ -59,7 +59,7 @@ def test_retrievechat():
assistant.reset()
code_problem = "How can I use FLAML to perform a classification task, set use_spark=True, train 30 seconds and force cancel jobs if time limit is reached."
- ragproxyagent.initiate_chat(assistant, problem=code_problem, search_string="spark")
+ ragproxyagent.initiate_chat(assistant, problem=code_problem, search_string="spark", silent=True)
print(conversations)
From 14d8e0bd82d99e59ce0188e696dd539c6aa4fdcd Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Mon, 14 Aug 2023 04:32:31 +0000
Subject: [PATCH 12/13] config
---
flaml/autogen/agentchat/contrib/math_user_proxy_agent.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/flaml/autogen/agentchat/contrib/math_user_proxy_agent.py b/flaml/autogen/agentchat/contrib/math_user_proxy_agent.py
index 702c043c91..556881aab1 100644
--- a/flaml/autogen/agentchat/contrib/math_user_proxy_agent.py
+++ b/flaml/autogen/agentchat/contrib/math_user_proxy_agent.py
@@ -280,6 +280,7 @@ def _generate_math_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
+ config: Optional[Any] = None,
):
"""Generate an auto reply."""
if messages is None:
From 09a382cd4c1557830809cc4a2a92119d5e02c61f Mon Sep 17 00:00:00 2001
From: Chi Wang
Date: Mon, 14 Aug 2023 05:06:52 +0000
Subject: [PATCH 13/13] mathchat
---
.../contrib/math_user_proxy_agent.py | 2 +-
notebook/autogen_agentchat_MathChat.ipynb | 584 +-----------------
.../agentchat/test_math_user_proxy_agent.py | 15 +-
3 files changed, 22 insertions(+), 579 deletions(-)
diff --git a/flaml/autogen/agentchat/contrib/math_user_proxy_agent.py b/flaml/autogen/agentchat/contrib/math_user_proxy_agent.py
index 556881aab1..e0a017adb4 100644
--- a/flaml/autogen/agentchat/contrib/math_user_proxy_agent.py
+++ b/flaml/autogen/agentchat/contrib/math_user_proxy_agent.py
@@ -203,7 +203,7 @@ def generate_init_message(self, problem, prompt_type="default", customized_promp
return PROMPTS[prompt_type] + problem
def _reset(self):
- super().reset()
+ # super().reset()
self._valid_q_count = 0
self._total_q_count = 0
self._accum_invalid_q_per_step = 0
diff --git a/notebook/autogen_agentchat_MathChat.ipynb b/notebook/autogen_agentchat_MathChat.ipynb
index 5e373d0b83..d94046f5ff 100644
--- a/notebook/autogen_agentchat_MathChat.ipynb
+++ b/notebook/autogen_agentchat_MathChat.ipynb
@@ -29,7 +29,7 @@
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -48,7 +48,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -122,7 +122,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -171,112 +171,9 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "mathproxyagent (to assistant):\n",
- "\n",
- "Let's use Python to solve a math problem.\n",
- "\n",
- "Query requirements:\n",
- "You should always use the 'print' function for the output and use fractions/radical forms instead of decimals.\n",
- "You can use packages like sympy to help you.\n",
- "You must follow the formats below to write your code:\n",
- "```python\n",
- "# your code\n",
- "```\n",
- "\n",
- "First state the key idea to solve the problem. You may choose from three ways to solve the problem:\n",
- "Case 1: If the problem can be solved with Python code directly, please write a program to solve it. You can enumerate all possible arrangements if needed.\n",
- "Case 2: If the problem is mostly reasoning, you can solve it by yourself directly.\n",
- "Case 3: If the problem cannot be handled in the above two ways, please follow this process:\n",
- "1. Solve the problem step by step (do not over-divide the steps).\n",
- "2. Take out any queries that can be asked through Python (for example, any calculations or equations that can be calculated).\n",
- "3. Wait for me to give the results.\n",
- "4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.\n",
- "\n",
- "After all the queries are run and you get the answer, put the answer in \\boxed{}.\n",
- "\n",
- "Problem:\n",
- "Find all $x$ that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$. Express your answer in interval notation.\n",
- "\n",
- "--------------------------------------------------------------------------------\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "assistant (to mathproxyagent):\n",
- "\n",
- "This problem can be solved by first simplifying the inequality, finding the critical points, and then testing points from each interval defined by the critical points to find where the inequality holds true. We can use Python with the sympy package for these calculations. Here is how:\n",
- "\n",
- "Case 1: Solving with Python directly\n",
- "\n",
- "We will solve this problem in the following steps:\n",
- "1. First, we simplify the inequality by expanding both sides and bringing all terms to one side.\n",
- "2. Second, we find the critical points by solving the simplified equation.\n",
- "3. Third, we test the sign of the simplified function with a number in each interval defined by the critical points.\n",
- "4. Finally, we collect all the intervals where the inequality is satisfied.\n",
- "\n",
- "Here is the Python code to accomplish these:\n",
- "\n",
- "```python\n",
- "from sympy import symbols, Eq, solve, simplify\n",
- "\n",
- "# Step 1: Simplify the inequality\n",
- "x = symbols('x')\n",
- "expr1 = simplify((2 * x + 10) * (x + 3))\n",
- "expr2 = simplify((3 * x + 9) * (x + 8))\n",
- "equation = simplify(expr1 - expr2)\n",
- "\n",
- "# Step 2: Find the critical points\n",
- "critical_points = sorted(solve(Eq(equation, 0)))\n",
- "\n",
- "# Step 3 and 4: Test the sign of the simplified function for each interval\n",
- "\n",
- "# First, let's check for x in (-oo, first critical point)\n",
- "test_point = critical_points[0] - 1\n",
- "if equation.subs(x, test_point) < 0:\n",
- " print(\"The inequality holds for x in (-oo, \" + str(critical_points[0]) + \")\")\n",
- "\n",
- "# Second, let's check for x in each (previous critical point, next critical point)\n",
- "for i in range(len(critical_points) - 1):\n",
- " test_point = (critical_points[i] + critical_points[i + 1]) / 2\n",
- " if equation.subs(x, test_point) < 0:\n",
- " print(\"The inequality holds for x in (\" + str(critical_points[i]) + \", \" + str(critical_points[i + 1]) + \")\")\n",
- "\n",
- "# Third, let's check for x in (last critical point, oo)\n",
- "test_point = critical_points[-1] + 1\n",
- "if equation.subs(x, test_point) < 0:\n",
- " print(\"The inequality holds for x in (\" + str(critical_points[-1]) + \", oo)\")\n",
- "\n",
- "# The intervals output in the print statements represent the solution to the inequality in the problem.\n",
- "```\n",
- "\n",
- "After running the above code, you will find the exact interval(s) that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "mathproxyagent (to assistant):\n",
- "\n",
- "The inequality holds for x in (-oo, -14)\n",
- "The inequality holds for x in (-3, oo)\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "Great! So the solution to the inequality $(2x+10)(x+3)<(3x+9)(x+8)$ is given by the union of the two intervals where the inequality holds true. In interval notation, we can express the solution as:\n",
- "\n",
- "$$\\boxed{x \\in (-\\infty, -14) \\cup (-3, \\infty)}$$\n",
- "\n",
- "--------------------------------------------------------------------------------\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"# given a math problem, we use the mathproxyagent to generate a prompt to be sent to the assistant as the initial message.\n",
"# the assistant receives the message and generates a response. The response will be sent back to the mathproxyagent for processing.\n",
@@ -301,127 +198,9 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "mathproxyagent (to assistant):\n",
- "\n",
- "Let's use Python to solve a math problem.\n",
- "\n",
- "Query requirements:\n",
- "You should always use the 'print' function for the output and use fractions/radical forms instead of decimals.\n",
- "You can use packages like sympy to help you.\n",
- "You must follow the formats below to write your code:\n",
- "```python\n",
- "# your code\n",
- "```\n",
- "\n",
- "First state the key idea to solve the problem. You may choose from three ways to solve the problem:\n",
- "Case 1: If the problem can be solved with Python code directly, please write a program to solve it. You can enumerate all possible arrangements if needed.\n",
- "Case 2: If the problem is mostly reasoning, you can solve it by yourself directly.\n",
- "Case 3: If the problem cannot be handled in the above two ways, please follow this process:\n",
- "1. Solve the problem step by step (do not over-divide the steps).\n",
- "2. Take out any queries that can be asked through Python (for example, any calculations or equations that can be calculated).\n",
- "3. Wait for me to give the results.\n",
- "4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.\n",
- "\n",
- "After all the queries are run and you get the answer, put the answer in \\boxed{}.\n",
- "\n",
- "Problem:\n",
- "For what negative value of $k$ is there exactly one solution to the system of equations \\begin{align*}\n",
- "y &= 2x^2 + kx + 6 \\\\\n",
- "y &= -x + 4?\n",
- "\\end{align*}\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "The key idea to solve this problem is to set the two equations equal to each other and solve for x. Then we substitute x back into one of the equations to get the quadratic equation in terms of y. We set the discriminant of this quadratic equation equals to zero, because when discriminant (D = b² - 4ac) equals to zero in a quadratic equation, the equation has exactly one root. Solve the equation for k.\n",
- "\n",
- "```python\n",
- "from sympy import symbols, Eq, solve\n",
- "\n",
- "x, k = symbols('x k')\n",
- "\n",
- "# The two Y's are equal, so we set the two equations equal to each other\n",
- "eq1 = Eq(2*x**2 + k*x + 6, -x + 4)\n",
- "solve_for_x = solve(eq1, x)\n",
- "\n",
- "# Insert x solutions back to the original equation\n",
- "eq2 = Eq(2*x + k, -x + 4)\n",
- "result = []\n",
- "\n",
- "for solution in solve_for_x:\n",
- " temp = eq2.subs(x, solution)\n",
- " sol = solve(temp, k)\n",
- " if sol != []:\n",
- " result.append(sol)\n",
- "\n",
- "# The solution should be negative\n",
- "negative_k = [s for s in result if s<0]\n",
- "\n",
- "print(negative_k)\n",
- "```\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "mathproxyagent (to assistant):\n",
- "\n",
- "Error: Traceback (most recent call last):\n",
- " negative_k = [s for s in result if s<0]\n",
- " negative_k = [s for s in result if s<0]\n",
- "TypeError: '<' not supported between instances of 'list' and 'int'\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "Apologies for the oversight in the code. It seems I made a mistake in checking for the negative value of k. Let's modify the code to fix the issue.\n",
- "\n",
- "```python\n",
- "from sympy import symbols, Eq, solve\n",
- "\n",
- "x, k = symbols('x k')\n",
- "\n",
- "# The two Y's are equal, so we set the two equations equal to each other\n",
- "eq1 = Eq(2*x**2 + k*x + 6, -x + 4)\n",
- "solve_for_x = solve(eq1, x)\n",
- "\n",
- "# Insert x solutions back to the original equation\n",
- "eq2 = Eq(2*x + k, -x + 4)\n",
- "result = []\n",
- "\n",
- "for solution in solve_for_x:\n",
- " temp = eq2.subs(x, solution)\n",
- " sol = solve(temp, k)\n",
- " if sol != []:\n",
- " result.extend(sol)\n",
- "\n",
- "# The solution should be negative\n",
- "negative_k = [s for s in result if s<0]\n",
- "\n",
- "print(negative_k)\n",
- "```\n",
- "This code should now properly identify the negative value of k for which there is exactly one solution to the system of equations.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "mathproxyagent (to assistant):\n",
- "\n",
- "[-3*sqrt(33)/2 - 7/2]\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "Great! Now we have the correct negative value of k for which there is exactly one solution to the system of equations. Therefore, the answer is:\n",
- "\n",
- "$$k = \\boxed{-\\frac{3\\sqrt{33}}{2}-\\frac{7}{2}}$$\n",
- "\n",
- "--------------------------------------------------------------------------------\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"math_problem = \"For what negative value of $k$ is there exactly one solution to the system of equations \\\\begin{align*}\\ny &= 2x^2 + kx + 6 \\\\\\\\\\ny &= -x + 4?\\n\\\\end{align*}\"\n",
"mathproxyagent.initiate_chat(assistant, problem=math_problem)"
@@ -440,109 +219,9 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "mathproxyagent (to assistant):\n",
- "\n",
- "Let's use Python to solve a math problem.\n",
- "\n",
- "Query requirements:\n",
- "You should always use the 'print' function for the output and use fractions/radical forms instead of decimals.\n",
- "You can use packages like sympy to help you.\n",
- "You must follow the formats below to write your code:\n",
- "```python\n",
- "# your code\n",
- "```\n",
- "\n",
- "First state the key idea to solve the problem. You may choose from three ways to solve the problem:\n",
- "Case 1: If the problem can be solved with Python code directly, please write a program to solve it. You can enumerate all possible arrangements if needed.\n",
- "Case 2: If the problem is mostly reasoning, you can solve it by yourself directly.\n",
- "Case 3: If the problem cannot be handled in the above two ways, please follow this process:\n",
- "1. Solve the problem step by step (do not over-divide the steps).\n",
- "2. Take out any queries that can be asked through Python (for example, any calculations or equations that can be calculated).\n",
- "3. Wait for me to give the results.\n",
- "4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.\n",
- "\n",
- "After all the queries are run and you get the answer, put the answer in \\boxed{}.\n",
- "\n",
- "Problem:\n",
- "Find all positive integer values of $c$ such that the equation $x^2-7x+c=0$ only has roots that are real and rational. Express them in decreasing order, separated by commas.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "To ensure the roots of the quadratic equation $x^2 - 7x + c = 0$ are real and rational, we will use two key ideas:\n",
- "\n",
- "1. The discriminant of the quadratic equation, $\\Delta = b^2 - 4ac$, must be non-negative so that the roots are real. \n",
- "2. The discriminant must be a perfect square so that the roots are rational.\n",
- "\n",
- "We can find the values of $c$ using Python. First, we'll calculate the discriminant and check the conditions for each value of c. We only need to consider positive integers for $c$ until we reach a value where the discriminant becomes negative since after that point, there will be no real roots.\n",
- "\n",
- "```python\n",
- "from sympy import *\n",
- "\n",
- "c_values = []\n",
- "c = 1\n",
- "\n",
- "while True:\n",
- " discriminant = Rational(49 - 4 * c)\n",
- " if discriminant < 0:\n",
- " break\n",
- " if int(discriminant.sqrt())**2 == discriminant:\n",
- " c_values.append(c)\n",
- " c += 1\n",
- "\n",
- "print(c_values)\n",
- "```\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "mathproxyagent (to assistant):\n",
- "\n",
- "Error: Traceback (most recent call last):\n",
- " if int(discriminant.sqrt())**2 == discriminant:\n",
- "AttributeError: 'Integer' object has no attribute 'sqrt'\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "Apologies for the mistake. I should have used the `sqrt` function from the `sympy` library. Here's the corrected code:\n",
- "\n",
- "```python\n",
- "from sympy import *\n",
- "\n",
- "c_values = []\n",
- "c = 1\n",
- "\n",
- "while True:\n",
- " discriminant = Rational(49 - 4 * c)\n",
- " if discriminant < 0:\n",
- " break\n",
- " if int(sqrt(discriminant))**2 == discriminant:\n",
- " c_values.append(c)\n",
- " c += 1\n",
- "\n",
- "print(c_values)\n",
- "```\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "mathproxyagent (to assistant):\n",
- "\n",
- "[6, 10, 12]\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "The positive integer values of $c$ such that the equation $x^2-7x+c=0$ only has roots that are real and rational are \\boxed{12, 10, 6}.\n",
- "\n",
- "--------------------------------------------------------------------------------\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"math_problem = \"Find all positive integer values of $c$ such that the equation $x^2-7x+c=0$ only has roots that are real and rational. Express them in decreasing order, separated by commas.\"\n",
"mathproxyagent.initiate_chat(assistant, problem=math_problem)"
@@ -585,157 +264,9 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "mathproxyagent (to assistant):\n",
- "\n",
- "Let's use Python to solve a math problem.\n",
- "\n",
- "Query requirements:\n",
- "You should always use the 'print' function for the output and use fractions/radical forms instead of decimals.\n",
- "You can use packages like sympy to help you.\n",
- "You must follow the formats below to write your code:\n",
- "```python\n",
- "# your code\n",
- "```\n",
- "\n",
- "Please follow this process:\n",
- "1. Solve the problem step by step (do not over-divide the steps).\n",
- "2. Take out any queries that can be asked through Python (for example, any calculations or equations that can be calculated).\n",
- "3. Wait for me to give the results.\n",
- "4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.\n",
- "\n",
- "After all the queries are run and you get the answer, put the answer in \\boxed{}.\n",
- "\n",
- "Problem: Problem: If $725x + 727y = 1500$ and $729x+ 731y = 1508$, what is the value of $x - y$ ?\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "Let's first solve the two equations as a system of linear equations. We can do this by either substitution or elimination method. Let's use the elimination method.\n",
- "\n",
- "We will multiply both equations by necessary multiples such that the coefficients of either x or y will cancel out when we subtract/add the two equations.\n",
- "\n",
- "We'll multiply the first equation by 729 and the second equation by -725. This will allow us to cancel out the x terms when we add the two equations.\n",
- "\n",
- "```python\n",
- "from sympy import symbols, Eq, solve\n",
- "\n",
- "x, y = symbols('x y')\n",
- "equation1 = Eq(725 * x + 727 * y, 1500)\n",
- "equation2 = Eq(729 * x + 731 * y, 1508)\n",
- "\n",
- "# Multiply equations with necessary multiples\n",
- "equation1 = Eq(729 * (725 * x + 727 * y), 729 * 1500)\n",
- "equation2 = Eq(-725 * (729 * x + 731 * y), -725 * 1508)\n",
- "```\n",
- "Now let's simplify the equations and add them up to cancel out the x terms.\n",
- "\n",
- "```python\n",
- "equation1_simplified = Eq(729 * 725 * x + 729 * 727 * y, 729 * 1500)\n",
- "equation2_simplified = Eq(-725 * 729 * x - 725 * 731 * y, -725 * 1508)\n",
- "\n",
- "# Add the two equations to cancel out the x terms\n",
- "combined_equation = Eq(equation1_simplified.lhs + equation2_simplified.lhs, equation1_simplified.rhs + equation2_simplified.rhs)\n",
- "```\n",
- "Now let's solve the combined equation for y.\n",
- "\n",
- "```python\n",
- "y_solution = solve(combined_equation, y)\n",
- "```\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "mathproxyagent (to assistant):\n",
- "\n",
- "No output found. Make sure you print the results.\n",
- "Error: Traceback (most recent call last):\n",
- " equation1_simplified = Eq(729 * 725 * x + 729 * 727 * y, 729 * 1500)\n",
- "NameError: name 'x' is not defined\n",
- "\n",
- "Error: Traceback (most recent call last):\n",
- " y_solution = solve(combined_equation, y)\n",
- "NameError: name 'combined_equation' is not defined\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "I apologize for not providing the output. I'll correct the code and print the results for you.\n",
- "\n",
- "```python\n",
- "from sympy import symbols, Eq, solve\n",
- "\n",
- "x, y = symbols('x y')\n",
- "equation1 = Eq(725 * x + 727 * y, 1500)\n",
- "equation2 = Eq(729 * x + 731 * y, 1508)\n",
- "\n",
- "# Multiply equations with necessary multiples\n",
- "equation1 = Eq(729 * (725 * x + 727 * y), 729 * 1500)\n",
- "equation2 = Eq(-725 * (729 * x + 731 * y), -725 * 1508)\n",
- "\n",
- "equation1_simplified = Eq(729 * 725 * x + 729 * 727 * y, 729 * 1500)\n",
- "equation2_simplified = Eq(-725 * 729 * x - 725 * 731 * y, -725 * 1508)\n",
- "\n",
- "# Add the two equations to cancel out the x terms\n",
- "combined_equation = Eq(equation1_simplified.lhs + equation2_simplified.lhs, equation1_simplified.rhs + equation2_simplified.rhs)\n",
- "\n",
- "# Solve the combined equation for y\n",
- "y_solution = solve(combined_equation, y)\n",
- "print(y_solution)\n",
- "```\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "mathproxyagent (to assistant):\n",
- "\n",
- "[25]\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "Great, we have found the solution for y: $y = 25$. Now, let's substitute the y value back into one of the original equations to solve for x. We can use the first equation for this.\n",
- "\n",
- "```python\n",
- "# Substitute y in the first equation\n",
- "equation1_substituted = equation1.subs(y, 25)\n",
- "\n",
- "# Solve the equation for x\n",
- "x_solution = solve(equation1_substituted, x)\n",
- "print(x_solution)\n",
- "```\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "mathproxyagent (to assistant):\n",
- "\n",
- "[-23]\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "Now we have found the solution for x: $x = -23$. To find the value of $x - y$, we can simply subtract y from x.\n",
- "\n",
- "```python\n",
- "x_minus_y = x_solution[0] - y_solution[0]\n",
- "print(x_minus_y)\n",
- "```\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "mathproxyagent (to assistant):\n",
- "\n",
- "-48\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "The value of $x - y$ is $-48$. So the final answer is $\\boxed{-48}$.\n",
- "\n",
- "--------------------------------------------------------------------------------\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"# we set the prompt_type to \"python\", which is a simplied version of the default prompt.\n",
"math_problem = \"Problem: If $725x + 727y = 1500$ and $729x+ 731y = 1508$, what is the value of $x - y$ ?\"\n",
@@ -757,98 +288,9 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": null,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "mathproxyagent (to assistant):\n",
- "\n",
- "Let's use two tools (Python and Wolfram alpha) to solve a math problem.\n",
- "\n",
- "Query requirements:\n",
- "You must follow the formats below to write your query:\n",
- "For Wolfram Alpha:\n",
- "```wolfram\n",
- "# one wolfram query\n",
- "```\n",
- "For Python:\n",
- "```python\n",
- "# your code\n",
- "```\n",
- "When using Python, you should always use the 'print' function for the output and use fractions/radical forms instead of decimals. You can use packages like sympy to help you.\n",
- "When using wolfram, give one query in each code block.\n",
- "\n",
- "Please follow this process:\n",
- "1. Solve the problem step by step (do not over-divide the steps).\n",
- "2. Take out any queries that can be asked through Python or Wolfram Alpha, select the most suitable tool to be used (for example, any calculations or equations that can be calculated).\n",
- "3. Wait for me to give the results.\n",
- "4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.\n",
- "\n",
- "After all the queries are run and you get the answer, put the final answer in \\boxed{}.\n",
- "\n",
- "Problem: Find all numbers $a$ for which the graph of $y=x^2+a$ and the graph of $y=ax$ intersect. Express your answer in interval notation.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "First, let's find the intersection points of the two graphs by setting $y = x^2 + a$ equal to $y = ax$. \n",
- "\n",
- "$x^2 + a = ax$\n",
- "\n",
- "Next, let's rearrange it to a quadratic equation:\n",
- "\n",
- "$x^2 - ax + a = 0$\n",
- "\n",
- "We will find the discriminant, $\\Delta$, using the following formula: \n",
- "$\\Delta = b^2 - 4ac$, where $a = 1$, $b = -a$ and $c = a$.\n",
- "\n",
- "Now, let's calculate the discriminant:\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "mathproxyagent (to assistant):\n",
- "\n",
- "Continue. Please keep solving the problem until you need to query. (If you get to the answer, put it in \\boxed{}.)\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "Sure, let's calculate the discriminant:\n",
- "\n",
- "$\\Delta = (-a)^2 - 4(1)(a) = a^2 - 4a$\n",
- "\n",
- "For the quadratic equation to have at least one real solution, the discriminant must be greater than or equal to zero:\n",
- "\n",
- "$a^2 - 4a \\ge 0$\n",
- "\n",
- "Now, let's find the values of $a$ that satisfy this inequality. We can do this by factoring the quadratic:\n",
- "\n",
- "$a(a - 4) \\ge 0$\n",
- "\n",
- "This inequality is true whenever $a \\le 0$ or $a - 4 \\ge 0$. Now let's find the intervals for $a$.\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "mathproxyagent (to assistant):\n",
- "\n",
- "Continue. Please keep solving the problem until you need to query. (If you get to the answer, put it in \\boxed{}.)\n",
- "\n",
- "--------------------------------------------------------------------------------\n",
- "assistant (to mathproxyagent):\n",
- "\n",
- "The inequality $a \\le 0$ represents the interval $(-\\infty, 0]$. \n",
- "\n",
- "The inequality $a - 4 \\ge 0$ can be rewritten as $a \\ge 4$, which represents the interval $[4, \\infty)$. \n",
- "\n",
- "Since we are looking for the values of $a$ where the graphs intersect, we need to consider both intervals. Therefore, the final answer would be the union of the two intervals:\n",
- "\n",
- "\\[\\boxed{(-\\infty, 0] \\cup [4, \\infty)}\\]\n",
- "\n",
- "--------------------------------------------------------------------------------\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"# The wolfram alpha appid is required for this example (the assistant may choose to query Wolfram Alpha).\n",
"import os\n",
diff --git a/test/autogen/agentchat/test_math_user_proxy_agent.py b/test/autogen/agentchat/test_math_user_proxy_agent.py
index a40ff718e9..5377791992 100644
--- a/test/autogen/agentchat/test_math_user_proxy_agent.py
+++ b/test/autogen/agentchat/test_math_user_proxy_agent.py
@@ -45,10 +45,11 @@ def test_math_user_proxy_agent():
assistant.reset()
math_problem = "$x^3=125$. What is x?"
- assistant.receive(
- message=mathproxyagent.generate_init_message(math_problem),
- sender=mathproxyagent,
- )
+ # assistant.receive(
+ # message=mathproxyagent.generate_init_message(math_problem),
+ # sender=mathproxyagent,
+ # )
+ mathproxyagent.initiate_chat(assistant, problem=math_problem)
print(conversations)
@@ -116,7 +117,7 @@ def test_generate_prompt():
if __name__ == "__main__":
- test_add_remove_print()
- test_execute_one_python_code()
- test_generate_prompt()
+ # test_add_remove_print()
+ # test_execute_one_python_code()
+ # test_generate_prompt()
test_math_user_proxy_agent()