From a2d4b475034f27a8c8116be7464666c28658d8c2 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Wed, 31 Jan 2024 16:30:55 +0100 Subject: [PATCH] Function calling upgrade (#1443) * function calling upgraded: async/sync mixing works now for all combinations and register_function added to simplify registration of functions without decorators * polishing * fixing tests --------- Co-authored-by: Eric Zhu Co-authored-by: Chi Wang --- .gitignore | 3 + autogen/agentchat/__init__.py | 7 +- autogen/agentchat/conversable_agent.py | 64 ++- notebook/agentchat_function_call.ipynb | 213 ++++++---- notebook/agentchat_function_call_async.ipynb | 211 ++++++---- ...at_function_call_currency_calculator.ipynb | 62 +-- test/agentchat/test_conversable_agent.py | 58 ++- .../test_function_and_tool_calling.py | 378 ++++++++++++++++++ website/docs/Use-Cases/agent_chat.md | 30 +- 9 files changed, 816 insertions(+), 210 deletions(-) create mode 100644 test/agentchat/test_function_and_tool_calling.py diff --git a/.gitignore b/.gitignore index fbcc60d1172d..66ccc528dccc 100644 --- a/.gitignore +++ b/.gitignore @@ -175,3 +175,6 @@ test/test_files/agenteval-in-out/out/ # Files created by tests *tmp_code_* test/agentchat/test_agent_scripts/* + +# test cache +.cache_test diff --git a/autogen/agentchat/__init__.py b/autogen/agentchat/__init__.py index 3db1db73a556..52cf15b050c0 100644 --- a/autogen/agentchat/__init__.py +++ b/autogen/agentchat/__init__.py @@ -1,14 +1,15 @@ from .agent import Agent from .assistant_agent import AssistantAgent -from .conversable_agent import ConversableAgent +from .conversable_agent import ConversableAgent, register_function from .groupchat import GroupChat, GroupChatManager from .user_proxy_agent import UserProxyAgent -__all__ = [ +__all__ = ( "Agent", "ConversableAgent", "AssistantAgent", "UserProxyAgent", "GroupChat", "GroupChatManager", -] + "register_function", +) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index cbe7e621e392..44115d2580fe 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -914,9 +914,20 @@ def generate_function_call_reply( func_call = message["function_call"] func = self._function_map.get(func_call.get("name", None), None) if inspect.iscoroutinefunction(func): - return False, None - - _, func_return = self.execute_function(message["function_call"]) + try: + # get the running loop if it was already created + loop = asyncio.get_running_loop() + close_loop = False + except RuntimeError: + # create a loop if there is no running loop + loop = asyncio.new_event_loop() + close_loop = True + + _, func_return = loop.run_until_complete(self.a_execute_function(func_call)) + if close_loop: + loop.close() + else: + _, func_return = self.execute_function(message["function_call"]) return True, func_return return False, None @@ -943,7 +954,9 @@ async def a_generate_function_call_reply( func = self._function_map.get(func_name, None) if func and inspect.iscoroutinefunction(func): _, func_return = await self.a_execute_function(func_call) - return True, func_return + else: + _, func_return = self.execute_function(func_call) + return True, func_return return False, None @@ -968,8 +981,20 @@ def generate_tool_calls_reply( function_call = tool_call.get("function", {}) func = self._function_map.get(function_call.get("name", None), None) if inspect.iscoroutinefunction(func): - continue - _, func_return = self.execute_function(function_call) + try: + # get the running loop if it was already created + loop = asyncio.get_running_loop() + close_loop = False + except RuntimeError: + # create a loop if there is no running loop + loop = asyncio.new_event_loop() + close_loop = True + + _, func_return = loop.run_until_complete(self.a_execute_function(function_call)) + if close_loop: + loop.close() + else: + _, func_return = self.execute_function(function_call) tool_returns.append( { "tool_call_id": id, @@ -1986,3 +2011,30 @@ def get_total_usage(self) -> Union[None, Dict[str, int]]: return None else: return self.client.total_usage_summary + + +def register_function( + f: Callable[..., Any], + *, + caller: ConversableAgent, + executor: ConversableAgent, + name: Optional[str] = None, + description: str, +) -> None: + """Register a function to be proposed by an agent and executed for an executor. + + This function can be used instead of function decorators `@ConversationAgent.register_for_llm` and + `@ConversationAgent.register_for_execution`. + + Args: + f: the function to be registered. + caller: the agent calling the function, typically an instance of ConversableAgent. + executor: the agent executing the function, typically an instance of UserProxy. + name: name of the function. If None, the function name will be used (default: None). + description: description of the function. The description is used by LLM to decode whether the function + is called. Make sure the description is properly describing what the function does or it might not be + called by LLM when needed. + + """ + f = caller.register_for_llm(name=name, description=description)(f) + executor.register_for_execution(name=name)(f) diff --git a/notebook/agentchat_function_call.ipynb b/notebook/agentchat_function_call.ipynb index 8716ed32ee8a..7f92c56797ab 100644 --- a/notebook/agentchat_function_call.ipynb +++ b/notebook/agentchat_function_call.ipynb @@ -61,6 +61,7 @@ "from typing_extensions import Annotated\n", "\n", "import autogen\n", + "from autogen.cache import Cache\n", "\n", "config_list = autogen.config_list_from_json(\n", " \"OAI_CONFIG_LIST\",\n", @@ -118,9 +119,78 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "id": "9fb85afb", "metadata": {}, + "outputs": [], + "source": [ + "llm_config = {\n", + " \"config_list\": config_list,\n", + " \"timeout\": 120,\n", + "}\n", + "chatbot = autogen.AssistantAgent(\n", + " name=\"chatbot\",\n", + " system_message=\"For coding tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", + " llm_config=llm_config,\n", + ")\n", + "\n", + "# create a UserProxyAgent instance named \"user_proxy\"\n", + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"user_proxy\",\n", + " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=10,\n", + " code_execution_config={\n", + " \"work_dir\": \"coding\",\n", + " \"use_docker\": False,\n", + " }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n", + ")\n", + "\n", + "\n", + "# define functions according to the function description\n", + "\n", + "\n", + "# one way of registering functions is to use the register_for_llm and register_for_execution decorators\n", + "@user_proxy.register_for_execution()\n", + "@chatbot.register_for_llm(name=\"python\", description=\"run cell in ipython and return the execution result.\")\n", + "def exec_python(cell: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", + " ipython = get_ipython()\n", + " result = ipython.run_cell(cell)\n", + " log = str(result.result)\n", + " if result.error_before_exec is not None:\n", + " log += f\"\\n{result.error_before_exec}\"\n", + " if result.error_in_exec is not None:\n", + " log += f\"\\n{result.error_in_exec}\"\n", + " return log\n", + "\n", + "\n", + "# another way of registering functions is to use the register_function\n", + "def exec_sh(script: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", + " return user_proxy.execute_code_blocks([(\"sh\", script)])\n", + "\n", + "\n", + "autogen.agentchat.register_function(\n", + " exec_python,\n", + " caller=chatbot,\n", + " executor=user_proxy,\n", + " name=\"sh\",\n", + " description=\"run a shell script and return the execution result.\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "f6952220", + "metadata": {}, + "source": [ + "Finally, we initialize the chat that would use the functions defined above:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "27d3e43a", + "metadata": {}, "outputs": [ { "name": "stdout", @@ -133,12 +203,63 @@ "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", - "\u001b[32m***** Suggested function Call: python *****\u001b[0m\n", + "\u001b[32m***** Suggested tool Call (call_bsaGbd8WGdC869LhG62hI0uK): python *****\u001b[0m\n", + "Arguments: \n", + "cell = \"\"\"\n", + "import matplotlib.pyplot as plt\n", + "import matplotlib.patches as patches\n", + "\n", + "# Creating a simple scene for two agents chatting\n", + "fig, ax = plt.subplots()\n", + "\n", + "# Draw two circles representing the agents\n", + "ax.add_patch(patches.Circle((2, 2), 0.5, fill=True, color='blue', label='Agent A'))\n", + "ax.add_patch(patches.Circle((5, 2), 0.5, fill=True, color='green', label='Agent B'))\n", + "\n", + "# Example dialogues as text\n", + "ax.text(1, 3, \"Hello!\", style='italic', bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 5})\n", + "ax.text(4, 3, \"Hi there!\", style='italic', bbox={'facecolor': 'yellow', 'alpha': 0.5, 'pad': 5})\n", + "\n", + "# Setting the limits of the plot\n", + "ax.set_xlim(0, 7)\n", + "ax.set_ylim(0, 4)\n", + "\n", + "# Hiding the axes\n", + "ax.axis('off')\n", + "\n", + "# Use this line just before the plt.show() if necessary\n", + "plt.savefig(\"agents_chatting.png\")\n", + "\n", + "# Don't add plt.show() as per the instructions\n", + "\"\"\"\n", + "return cell\n", + "\u001b[32m***********************************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\u001b[32m***** Response from calling tool \"call_bsaGbd8WGdC869LhG62hI0uK\" *****\u001b[0m\n", + "Error: Expecting value: line 1 column 1 (char 0)\n", + " You argument should follow json format.\n", + "\u001b[32m**********************************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "\u001b[32m***** Suggested tool Call (call_ujcz2CkK0UgEEUen7X1ctXhe): python *****\u001b[0m\n", "Arguments: \n", "{\n", - " \"cell\": \"import matplotlib.pyplot as plt\\nimport matplotlib.patches as patches\\n\\n# Create a figure to draw\\nfig, ax = plt.subplots(figsize=(8, 5))\\n\\n# Set plot limits to avoid text spilling over\\nax.set_xlim(0, 2)\\nax.set_ylim(0, 2)\\n\\n# Hide axes\\nax.axis('off')\\n\\n# Draw two agents\\nhead_radius = 0.1\\n\\n# Agent A\\nax.add_patch(patches.Circle((0.5, 1.5), head_radius, color='blue'))\\n# Agent B\\nax.add_patch(patches.Circle((1.5, 1.5), head_radius, color='green'))\\n\\n# Example dialog\\nbbox_props = dict(boxstyle=\\\"round,pad=0.3\\\", ec=\\\"black\\\", lw=1, fc=\\\"white\\\")\\nax.text(0.5, 1.3, \\\"Hello, how are you?\\\", ha=\\\"center\\\", va=\\\"center\\\", size=8, bbox=bbox_props)\\nax.text(1.5, 1.3, \\\"I'm fine, thanks!\\\", ha=\\\"center\\\", va=\\\"center\\\", size=8, bbox=bbox_props)\\n\"\n", + " \"cell\": \"import matplotlib.pyplot as plt\\nimport matplotlib.patches as patches\\n\\n# Creating a simple scene for two agents chatting\\nfig, ax = plt.subplots()\\n\\n# Draw two circles representing the agents\\nax.add_patch(patches.Circle((2, 2), 0.5, fill=True, color='blue', label='Agent A'))\\nax.add_patch(patches.Circle((5, 2), 0.5, fill=True, color='green', label='Agent B'))\\n\\n# Example dialogues as text\\nax.text(1, 3, \\\"Hello!\\\", style='italic', bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 5})\\nax.text(4, 3, \\\"Hi there!\\\", style='italic', bbox={'facecolor': 'yellow', 'alpha': 0.5, 'pad': 5})\\n\\n# Setting the limits of the plot\\nax.set_xlim(0, 7)\\nax.set_ylim(0, 4)\\n\\n# Hiding the axes\\nax.axis('off')\\n\\n# Use this line just before the plt.show() if necessary\\nplt.savefig(\\\"agents_chatting.png\\\")\\n\\n# Don't add plt.show() as per the instructions\\n\"\n", "}\n", - "\u001b[32m*******************************************\u001b[0m\n", + "\u001b[32m***********************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[35m\n", @@ -147,19 +268,9 @@ }, { "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgMAAAGFCAYAAABg2vAPAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAhyklEQVR4nO3de3hU9b3v8c/MmoQQCIGQCBG530EhCt4qiuKFugW1sKlVqy2ictm4z1Gstlu3ioKeXWt7tDyogFZPLbUqqDVURcD7FYhBLpIgoKJJIIEQkNzmss4fI4FIiAQm+c2s3/v1PDyPJGHmm5g18561fmuNz3VdVwAAwFp+0wMAAACziAEAACxHDAAAYDliAAAAyxEDAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsRwwAAGA5YgAAAMsRAwAAWI4YAADAcsQAAACWIwYAALAcMQAAgOWIAQAALEcMAABgOWIAAADLEQMAAFiOGAAAwHLEAAAAliMGAACwHDEAAIDliAEAACxHDAAAYDliAAAAyxEDAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsRwwAAGA5YgAAAMsRAwAAWI4YAADAcsQAAACWIwYAALAcMQAAgOWIAQAALEcMAABgOWIAAADLEQMAAFiOGAAAwHLEAAAAliMGAACwHDEAAIDliAEAACxHDAAAYDliAAAAyxEDAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsRwwAAGA5YgAAAMsRAwAAWI4YAADAcsQAAACWIwYAALAcMQAAgOWIAQAALEcMAABgOWIAAADLEQMAAFiOGAAAwHLEAAAAliMGAACwHDEAAIDliAEAACxHDAAAYDliAAAAyxEDAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsFzA9QKKoqKhQZWWl6TGaRWpqqtLT002PAYt4eXtqDmyjaG7EwBGoqKjQnPvuU7CszPQozSIpM1PT//u/ebBBi6ioqNCcOfcpGPTm9tQckpIyNX062yiaDzFwBCorKxUsK9O41q2VlZpqepyYKq2s1OKyMlVWVvJAgxZRWVmpYLBM48a1VlaWt7an5lBaWqnFi9lG0byIgSbISk1Vdlqa6TFir6rK9ASwUFZWqrKzPbg9NQu2UTQvFhACAGA5YgAAAMsRAwAAWI4YAADAcsRAHPr3557T/37ttbq/D583T//3o48MTgTErw8/3KZWrWZp377aJv27OXM+UU7OY800FZBYiIEYGvnUU7ru5ZcP+fjclSvV9v77FXHdI7qd/JIS5XTuLEkKRSJaX1pa9/cj1ekPf9Dv33+/Sf8GiCcjRz6l665rYHuau1Jt296vSCS6PeXkdNa3396iNm2SD3tbGRn/o3/+s6Dex/LzSzRkSKfYDt0Ec+euVFraA3XfB2ASMRAjruvq0+JiDcvOPuRzq4qKlNO5s/w+34/ezt6aGm0pL6978t9YVqbqUEhDOx35g9a2igrt2LdPw48//si/ASCOuK6rTz8t1rBhDWxPq4qUk9NZfn90e2rdOkmZmYe/XsGmTTtVXl6t4cPrbw/5+SUaOjS2MRAMho/4a1evLtLJJx/4PgCTiIEY2bRrl/bW1mpYA0/Aq4qK6iIhFInoDx98oN6PPKLWs2dr2Lx5everr+q+ds327Qr4/RqUlSUpupegW3q6OrRuXfc1C/LydOLcuWo9e7ZOevRRLSksrHd/q4uLJUmnNBAmQCLYtGmX9u6t1bBhDWxPq4rqRcK55z6le+55q8HbefLJT9Wv3xxJUpcuf5TPN1OPPbZKoVBE69eXKj09RePHP6e0tAfUrduf9Mor9fcerF+/Q2PGLFTbtvfruOMe1PTp/1JNTajefd9yy+uaNm2JMjL+R+PHPydJ2rmzUtOmLdFxxz2otLQHNGbMQm3bVlHvtlevbjh2ABOIgRhZXVQkx+c75BV8VTCoDaWlOiU7W67ravxzz2nJpk166rLLtH7aNF3cp48u/8c/tKemRlL0yX9gVpaSHafu7wcfIrhj+XLd/dZbmj1qlDZMm6arTzpJ4597Tl/s2lVvlt4dOqh9SkoLfOdA7K1eXSTH8R3yyr2qKqgNG0p1yikHnkQ/+2y7cnIaPox2xRWDdcstZ2jkyO4qLp6h4uIZmjgxRxs3lqm6OqS5c1dq4sQcrVkzRSNH9tB//ueBtToffLBNZ531pC68sJfy86do0aKf6/XXN+vBBz+od99PP71Gffpk6JNPbtDDD/9UZWWVOvXU+XJdV8uXX6uPPpokSZo48cAhj+rqkNavL20wdgATuAJhjOQVFyvsukq9//4GP39Kdrb+vm6dPi8t1dqpU9UqEP3Rzxo1Sg9//LHyS0p0Tvfuhzz555eU6KyuXSVJn23frv/z/vv6cNIkndaliyTptyNG6M+ffKI3Nm9Wn4wMSdG9C/s/DySivLxihcOuUlMPsz19HwNffbVb5eXVh93d36ZNsjZvLtfw4cerc+e2dR/Pzy9RIODXs8/+u/r16yhJ+vnPB+nFFz+XJIXDEU2a9E/96U+jNXHiyZKkPn0yNGXKMC1Zskl33nlO3X0/+OCFuuWWM+tu+8YbX9HZZ3fXo4+OqfvYzJnn6swzn1A4HJHj+LVhQ6lCoYhOO43tFPGBGIiRvJIS/WzAAN01cmS9jz+7bp0e+fhjDcrK0v967TVt3b1bHX//+3pfsy8YVMAf3UmTX1Kiq046qe5za7Zv17RTT5UkLVy7Vqcef/whT/TJjqOa8IFjlZt27dKMM88UkKjy8kr0s58N0F13/WB7enadHnnkYw0aFD2MtmbNdqWnt1LPnh0Oe1ufflqi8eMH1vtYfn6Jzj67W10ISNLWrbvVp080qN9/f5s2bizT9Omv6qabXq37mmAwopEju9fdd3Kyo8mTh9V9vro6pL/9ba3C4YgWLdpQ9/FIxJXP56tbH7Bp00516ZJW7/4Bk4iBGMkrLtbMc889ZNX/3JUrNaRTJzl+v/JLSvTYJZdoZI8eh/z77unph5w58M2ePSqrrKz7+/rSUp143HH1/t3emhptq6jQSQd9/PQuXTS6d+8Yf4dAy8nLK9bMmecesvt/7tyVGjKkkxzn+3j+kTMCysur9PXXFRo6tP7t5OeX6IwzTjjkY/vvb//tLlr080NuMy0tue5rhg3LVlpaq7rPFRbuVGVlUGvXTlVKSv2H10DAL9/3i4jbtEnWjBkEO+IHMRADW8rLtbu6usEFe3nFxTr9+1fySY4jV6rbnf9D63bsUHUoVPfkn19SonatWqln+/aSpLTkZFWFQvX+zSMff6zstLR6gfHU5Zcf8/cEmLJlS7l2766uty5gv7y8Yp1++oE9Y2vWHH69gCStXbtDycmOBg7MrPfxNWu213tFL0X3IPzqV0MlSUlJfu3aVaXevTvUPYH/0Jo123XyyfXvOykpGikpKYG6vQwNGTOm32E/B5jAAsIYWF1UJL/Pd8hegWA4rHU7dtRFwr/17auZb7+tJYWF+nL3bn38zTea/c47+vibbyRFn/xPaNdOGd+fOZBfUqIhnTrVPRhd3KeP/llQoNzCQm0tL9fDH32k+997T09cemndYYaHP/pIF/31ry31rQMxt3p1kfx+3yFP8sFgWOvW7agXCT92emAk4sp1XeXnl6ik5DvV1ob1zTd7VFZWqZNPPnA7wWBYGzaU1t3n+ef3UmnpPk2f/i99/nmpCgrK9PLLG3XXXW/Wu++Db0OS+vbtqD59MnT99f/UJ598qy1byvXmm1t1003/UlVVUJJUWxvW4MFzD7nuAWASewZiIK+4WH0zMtQ2uf5FTzaUlqomHK6LgYd/+lP9dtkyTc7NVVllpTq3bauRPXpoyvDhkg49cyC/pEQ5B52d8MshQ7SlvFxTcnNVXl2t4ccfr9euvlpnd+9e9zVvfvmlUgL8b0XiyssrVt++GWrb9gfb04ZS1dSE62Jg794abd1a3uiegREjumn8+EE677yntW9fUOvWTdXWrbuVlpas3r0PrDNYv75UtbXhutvq16+jXnrpF7rrrjd16qnzlZzsaMCATN1002mN3ncg4Fdu7pW69dY3dPHFf1N1dUg9e7bX5ZcPUOvWSXXfx4YNpTrhhHbH/sMCYsTnukd4WTyLFRcX6/Hf/U6TO3ZUdpq33n+9eO9ePb5zpyY/8ICyuS4BWkBxcbEef/x3mjy5o7KzvbU9NYfi4r16/PGdmjyZbRTNh8MEAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsRwwAAGA5YgAAAMtxqbomKK2sND1CzHnxe0JiKC3ld+9I8HNCSyAGjkBqaqqSMjO1uKxMqqoyPU7MJWVmKjU11fQYsERqaqqSkjK1eHGZJO9tT80hKYltFM2LyxEfoYqKClV69FV0amqq0tPTTY8Bi3h5e2oObKNobsQAAACWYwEhAACWIwYAALAcMQAAgOWIAQAALEcMAABgOWIAAADLEQMAAFiOGAAAwHLEAAAAliMGAACwHDEAAIDliAEAACxHDAAAYDliAAAAyxEDAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsRwwAAGA5YgAAAMsRAwAAWI4YAADAcsQAAACWIwYAALAcMQAAgOWIAQAALEcMAABgOWIAAADLEQMAAFiOGAAAwHIB0wMg8dXWSvn50ubN0tat0T/l5VIwKDmOlJIide0q9eol9ewpDR0qdepkemogfm3/brvWbF+jreVbtaV8i7bt2abqULXCblhJ/iR1SOmgnh16qmf7nuqd0Vs5nXOU7CSbHhsJjBjAUSkslF5+WXrjDendd6Xq6ujHA9//RoXDkutG/9txJL9fCoUOfKx/f2n0aOmii6J/kpJa/nsA4kUwHNTSzUu1dPNSvbb5NRXuLJQk+eRTwB9QxI0o7IbrPub4HUlSKBKSJKUEUnROt3N0Qa8LdNmAy9SvYz8z3wgSls919z88A42rrpYWLZIee0x6773oE7wkRSJHd3uBQDQQMjOlG26Qrr8+uvcAsMWW8i1akLdA8/Pmq6yyTEn+JAUjwaO6Lb8vukFG3IhGdBuhqcOnatzAcUoJpMRyZHgUMYAfVVsrzZsn3X23tGtX9JV+OBzb+9h/m1dcId1/P1EAb9u8a7PuWHGH/rH+H3J8Tt2r/ljZf5sZrTM089yZunHYjRxGQKOIARyW60rPPy/ddpv01Vctc5/7DzNMnSrddVd0rwHgFWWVZbr37Xv16KpHJVcKuaEWud/u6d31+wt/rwmDJsjn87XIfSKxEANo0I4d0qRJUm5u9HDA0R4KOFqOI6WnS08/LY0Z07L3DTSHVwpe0a9f/rUqqitivifgx/jlV0QRje03VgsuXaDj2hzXoveP+EcM4BCvvCL96lfSnj2xPxzQFPsj5IYbpD/+UWrb1twswNH6rvY73fz6zVqQt6DuSdkUx+coPSVdT132lMb2H2tsDsQfYgB1XFe65x7p3nvN7A04HL8/evbB0qXSCSeYngY4ctsqtmn0M6NVsLNAETc+Nqj9QXL3yLt198i7OWwAScQAvldbG13N/9e/mp6kYYGAlJEhvf66lJNjehrgx+WX5Gv0M6O1q2pX3SmA8ebaoddq/tj5LC4EMQBp3z7pkkui1wuIl70BDXEcKTk5uo5h1CjT0wCHt2LrCo1ZOEa14doWXx/QFH6fX+d0P0e5V+aqTXIb0+PAIGLAcjU10QV6b75pdn3AkfL7o0Hw5pvSGWeYngY41IfbPtSo/zdKteHauDk00BjH5+i8Hucp96pctQq0Mj0ODOG9CSwWCklXXimtWJEYISBF91wEg9GrFq5da3oaoL7Ptn+m0c+MTpgQkKSwG9aKL1foqsVXKRxJkAcCxBwxYCnXlaZNk156Kb4PDTQkHJYqK6Xzzmu56x8AP+ar3V9p1NOjVBmsTJgQ2C/iRvTi5y9q6pKpYmexnYgBSz3+uDR//oH3Ckg04bBUUSFdeqlUVWV6GtiuKlilsX8fq4qalr+GQKy4cjU/b77mrZ5nehQYQAxYaOVK6aabTE9x7EIhad06b3wvSGzTX52u9aXr4/asgaaY/up0rSpaZXoMtDAWEFpmzx7pxBOloqLEWSdwJBYujK5/AFrawrULdfXiq02PETOOz1GXdl20dupatWvVzvQ4aCHsGbDMb3/rvRDw+aLvZVBaanoS2GbHvh2aumSqfPLOhXvCbljf7vlWv1v+O9OjoAURAxb58MPo2w97KQSk6LqH776Tbr7Z9CSwzc2v3ax9tfvkyls7WMNuWI+ufFQfbvvQ9ChoIRwmsEQwKA0ZIm3a5L0YONjSpdKFF5qeAjZ4Y/MbuuiZi0yP0Wwcn6N+HftpzZQ1SnKSTI+DZsaeAUv8+c9SQYG3Q8Dvl268MbqwEGhOwXBQN+beKL/Puw+hYTesjWUbNeeTOaZHQQvw7m8y6lRWSrNnJ+5phEcqEpG+/DK6mBBoTgvXLtSXu79MuOsJNJUrV7Pfna3KYKXpUdDMiAELzJsnlZebnqJl+HzRd1708h4QmBWKhDTz7ZmeWjTYmF1VuzR/9XzTY6CZEQMeV1Vlx16B/VxX2rpVevZZ05PAq55d96y27t7quUWDh7N/70B1qNr0KGhGxIDHLVgg7dxpeoqW5fezdwDNIxwJ65637pHfsofOssoyLchbYHoMNCO7fqMtU10tzZplz16B/SIR6YsvpBdeMD0JvOb5Dc9rc/lmReTttQINmfXOLNWEakyPgWZCDHjYiy9KO3aYnsIMv1966CHTU8Br/vjhHz19BsHhuHK1fd92Lf58selR0Ezs+622yKJFkuOYnsKMSCT6HgzFxaYngVcU7S3SyqKVnj+D4HAC/gAx4GHEgEdVVUlLlth93Nzni75FMxALL218yZozCBoSioSUuylXVUHeJtSLiAGPeuON6JoBm/l80b0jQCy8sOEF+Xz2xoAkVYeqtWzLMtNjoBkQAx714otSIGB6CrMiEemtt+y5xgKaz66qXXr7q7etPUSwX8Af0IsbXzQ9BpoBMeBBoZC0eDGX5ZWih0lyc01PgUSXW5hrfQhI0UMFiz9frFCEBxevIQY86J13pD17TE8RHxwnGkbAsVj0+SI5PktX4/5ARU2F3v3qXdNjIMaIAQ/iEMEB4bD06qvR92cAjsa+2n167YvXFHYtXo17EA4VeBMx4EEcIqivpkZaxponHKXlW5erNlxreoy4sf9QAbyFGPCYigqpqMj0FPElEJA++8z0FEhUa0rWKOBnV9vBvt37rfbUcCzSS4gBjykoMD1B/HFdfi44egU7C+Tadk3vI1BQxkblJcSAx/Ckd6hwWFq3zvQUSFTrdqxjvUADCnbyYOMlxIDHbNwoJSWZniL+FBba94ZNOHau66pwZ6HpMeJOkj9JG8s2mh4DMUQMeExBgd2XID6cykqppMT0FEg0xd8VqyrE5Xd/KOyGOUzgMcSAx6xbF73yHg7FIRQ0FU94DYu4Ea0r5diblxADHhIOS1u2mJ4iPvl8xACarmBngdVvTtSYLeVbuCqjhxADHvL111IwaHqK+BQIEANouoKyAk4rPIzacK2+rvja9BiIEWLAQ774wvQE8SsUii4iBJpi065NCkYo7MPZtHOT6REQI8SAh3z3nekJ4pfrSnv3mp4CiYYL6zRuX3Cf6REQI8SAh9TUmJ4gvvHzQVNVh6pNjxDXakJsVF5BDHhILZdPb1Q1j+toopowT3aN4T0bvIMY8BBe+TaOnw+aile+jSOWvIMY8BCuL9A4LsaEpuLUucaFI2xUXkEMeEhysukJ4ltKiukJkGhaOa1MjxDXWgX4+XgFMeAhrdguG0UsoamSA/zSNIZY8g5iwEN4smscewbQVCkBfmkak+zwoOMVxICHsGegccQAmooYaByHCbyDGPCQE04wPUH8CgSkbt1MT4FE0y29G5cjbsQJ7XjQ8QpiwEP69TM9QfxyXal/f9NTINH079hfruuaHiNu9c3oa3oExAgx4CFt2kidOpmeIj6Fw8QAmq5/x/4Ku5w+15DObTurTXIb02MgRogBjxk82PQE8WvAANMTINEMyOSX5nAGZ/Fg4yXEgMcMHCglJZmeIv74/VKvXqanQKLpndFbfh8Pkz+U5E/SwMyBpsdADPFb7jH9+3OlvYZ068apl2i6ZCdZXdt1NT1G3AlFQuqfyXE3LyEGPKZ/fy5L/EM+H4dPcPQGHzdYPvlMjxFXXLnq35EY8BJiwGNYJHeoQID1Ajh6AzoO4PTCBrBnwFuIAY/p2pWL6/xQMEgM4OgNyBygYCRoeoy4khJI4RoDHkMMeIzfL40aJTmO6Uniy/nnm54Aier8XvzyHMzxOTq/5/ksrPQY/m960PjxLCI82IknSj17mp4CiapXh16cRneQsBvW+IHjTY+BGCMGPGjs2OiiOUT3kEyYYHoKJLoJgybI8bG7TZJ88mls/7Gmx0CMEQMelJUlnXVW9JCB7cJhadw401Mg0Y0bOI4rEUry+/wa0W2EMlMzTY+CGOPpwqN4NRzVowenFeLYnXjcieqe3t30GMa5rqsJg3hw8SJiwKMuv5zrDQQC0SjikAmOlc/n04RBE6w/xdCVq8sHXG56DDQDYsCjunWThg61+4kwFOIQAWJn3MBxCkVCpscwKqdzjrqmc0VGLyIGPMz2V8VZWdJpp5meAl5x+gmnKys1y/QYxjg+h0MEHkYMeNhVV5mewBzHkSZOZBElYsfv8+vXOb+29qwCV66uPPFK02OgmfBQ6WE9e0q//GX02LltAgHplltMTwGvmXHmDCvXDQT8AV0z5Br17MAFO7yKGPC4O++07wJEjiP9x39InTqZngRe06ltJ007dZp1ewfCkbDuPOdO02OgGREDHte3b/RwgU17BxxHuvVW01PAq37zk99YdSnegC+gq4dcrT4ZfUyPgmZkz2+0xWzaO+A40pQpUna26UngVdlp2ZoyfIo1ewfCblh3ns1eAa8jBiwwYIB0xRV27B3w+6Xbbzc9Bbzu9rNul8+CU3UC/oB+ceIveLtiCxADlpg92/vvZOj3S7fdJh1/vOlJ4HVd2nXRbT+5zfOHCxyfo1mjZpkeAy3A27/JqNOrlzRzpnevO+D3Ry+0dMcdpieBLe485051bdfVs0Hgk0/3nnevenXoZXoUtACf67qu6SHQMoJBKSdHKijw5hqCZcuk83nrebSgZVuW6cK/Xmh6jJhzfI76Z/ZX/uR8JTlJpsdBC/Bm0qJBSUnSk09KXss/x5GuvZYQQMu7oNcFumbINZ5bTOjK1V8u+wshYBFiwDKnny7dd5/pKWLHcaSuXaU5c0xPAlvN+bc56pre1VNBMOu8WTqtC9fytgmHCSwUiUgXXywtX574hwtatZI++ih6+AMwJb8kX6cvOF214VrToxwTx+fogl4X6F9X/8uzayHQMP5vW8jvl/7+9+iq+0Q/w2DBAkIA5uV0ztETlz5heoxj4vgcdWnXRQvHLyQELMT/cUtlZEQX3KWnJ24QzJoVfe8FIB78csgvdd95iXkMzvE5Sk9J1xvXvKGM1hmmx4EBxIDF+vWLHipo3Trx3t1vxgzpv/7L9BRAfXecfYduOSOx3iHL7/OrdVJrrbh2hfp17Gd6HBjCmgHo/felCy6InnqYCGsIJk2S5s/37jUTkNhc19X1r1yvJz990vQoP8rxOUpykrTsmmU6q9tZpseBQQn2ehDN4ayzpHfekdq3j/9LFt9+uzRvHiGA+OXz+TR/7Hzd9pPbTI/SqIA/oPYp7fXuxHcJAbBnAAd8+aV00UXSli3xtYfA54v+mTtXmjzZ9DTAkXts1WOatmSapOi5+/HC8Tnq1aGXll6zVD3a9zA9DuIAMYB6ysula66RliwxPUmU40QXOS5cKI0ebXoaoOle/+J1XbX4KlVUVyjsxkdlX9L3Ej0z7hm1T2lvehTECWIAh3Bd6YknpJtukkKh6B9Txo6NzpKVZW4G4Fjt2LdDk16epNxNucZmCPgDCvgDmnPxHF138nVWvOsijhwxgMPavFm6/nrprbeir9Bb6tCB3y+1ayc99JA0cSLrA+ANruvqL/l/0YylM7SnZo8ibqRF7tfxOQq7YZ3b41wtGLtAvTN6t8j9IrEQA2iU60pLl0ZP5Vu/vnmjwHGi75/wm99It94aDQLAa/bU7NEfPviDHvzgQQXDwWY7dLA/AgZnDdZDFz2ki3pfxN4AHBYxgCMSiUjPPx99D4D33otdFOy/ncxM6YYboocmsrOP/XaBeFe8t1iPfPyI5ufN186qnXVP3sdq/+2M6DZC00+drgmDJ3BFQfwoYgBNVlAQvQzwCy9Ez0CQoqckHsnaAr8/uts/HI5e7Oj886XrrpPGjInuFQBsEwwHlVuYqyfzn9TyLctVFaqS43Pkyj2iQwkBf0ChSHTj69G+hyYMmqDrT7meCwihSYgBHJNt26QVK6J7CwoLo+sMioujexIOlpYm9ewZverh0KHRCBg+nAAADhYMB7WqaJWWb12uNSVrVLirUFvLt2pv7d56X+f3+ZXdNlu9M3qrX0Y/jeg2QqN6jlLX9K6GJkeiIwYQc8Gg9N13Um1tdI9BSorUpo3pqYDEta92n6pD1QpFQkp2ktU2ua2SHEoasUMMAABgOVaVAABgOWIAAADLEQMAAFiOGAAAwHLEAAAAliMGAACwHDEAAIDliAEAACxHDAAAYDliAAAAyxEDAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsRwwAAGA5YgAAAMsRAwAAWI4YAADAcsQAAACWIwYAALAcMQAAgOWIAQAALEcMAABgOWIAAADLEQMAAFiOGAAAwHLEAAAAliMGAACwHDEAAIDliAEAACxHDAAAYDliAAAAyxEDAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsRwwAAGA5YgAAAMsRAwAAWI4YAADAcsQAAACWIwYAALAcMQAAgOWIAQAALEcMAABgOWIAAADLEQMAAFiOGAAAwHLEAAAAliMGAACwHDEAAIDliAEAACxHDAAAYDliAAAAyxEDAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsRwwAAGA5YgAAAMsRAwAAWI4YAADAcsQAAACWIwYAALAcMQAAgOWIAQAALEcMAABgOWIAAADLEQMAAFiOGAAAwHLEAAAAliMGAACwHDEAAIDliAEAACxHDAAAYDliAAAAyxEDAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsRwwAAGA5YgAAAMsRAwAAWI4YAADAcsQAAACWIwYAALAcMQAAgOWIAQAALEcMAABgOWIAAADLEQMAAFiOGAAAwHLEAAAAliMGAACwHDEAAIDliAEAACxHDAAAYDliAAAAyxEDAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsRwwAAGA5YgAAAMsRAwAAWI4YAADAcsQAAACWIwYAALAcMQAAgOWIAQAALEcMAABgOWIAAADLEQMAAFiOGAAAwHLEAAAAliMGAACwHDEAAIDliAEAACxHDAAAYDliAAAAyxEDAABYjhgAAMByxAAAAJYjBgAAsBwxAACA5YgBAAAsRwwAAGA5YgAAAMsRAwAAWI4YAADAcsQAAACWIwYAALAcMQAAgOWIAQAALEcMAABgOWIAAADL/X+ddod+TKnv1QAAAABJRU5ErkJggg==", "text/plain": [ - "Text(1.5, 1.3, \"I'm fine, thanks!\")" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAGVCAYAAABuPkCWAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAeYElEQVR4nO3dfbDWdZ3/8de5QxQRwszwHmNFgXO4U1HSkhtpbH6TZogmK+uyiU6xOVvZaqvrNLujdLOWld1gOSZQuobZjWWIwoRSoImJN+iaQjeYYgqCIjfnXL8/znASuREUuDjn83jMnBk4h+v7/VzcvHme7/W9vt+aSqVSCQAAxait9gIAANi9BCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBh6qu9AMpSqSTLlydLlmz6sXJlsm5dsn59669raEg6dUq6dUuOOGLTjwMOSGpqqrN+gDeqVCpZ/uryLFmxZJOPlWtXZl3zuqxvbh1sDXUN6VTXKd326pYjuh+xyccB+xyQGoON3UgAskutXp3ce28ye3Yyc2by+OPJ2rV//3pdXVJbm7S0tH5UKq2fr6lp/fzGrzU3//0xe+2VHHNMMnp0MmJEctJJSZcuu/d5AeVavW517v3jvZn9zOzMfHpmHl/+eNY2/32w1dXUpbamNi2VlrRUWlJJ62CrSU1qa2rbvtZc+ftg26turxxzwDEZfeTojOg1IicddlK6dDLY2HVqKpWN/+XCzvHEE8n06clddyX3398ab/X1yYYNO3c/G7dZV5ccd1xy6qnJuHFJnz47dz8AT7zwRKYvmp67/nBX7l92f5orzamvrc+Glp072DZus66mLscddFxOfc+pGdc4Ln3eabCxcwlAdooNG5Kf/jT5+teTOXNao+z1R+12h437HD48mTQp+dCHWiMR4K3Y0LIhP33ip/n6gq9nzpI5qaup2+So3e6wcZ/DjxieScdPyof6fCj1tQYbb58A5G158cXkG99IvvnN5LnnqhN+b7RxDQcemHz8460x2KNHddcEtB8vrnkx31jwjXzz/m/muVeeq0r4vdHGNRzY5cB8/LiPZ9Lxk9Jjb4ONt04A8pZs2JBMmZJ87nPJqlWt5+ntiWprk65dk6uvTi64wBFBYOs2tGzIlN9Nyefu/lxWrVuVlsqeOdhqa2rTtVPXXD3y6lww5AJHBHlLBCA7bNGi5Lzzkt//vtor2TEDBiRTpyaNjdVeCbCnWfTcopz34/Py++fa12AbcOCATP3w1DQeaLCxY1wHkO1WqSTXXJMMHpw88ki1V7PjHn20de3XXPP3dxsDZatUKrnmN9dk8JTBeeT59jfYHl3+aAZPGZxrfnNNHM9hRzgCyHapVJLPfjb58pervZKd45JLki98wfUEoWSVSiWfveuz+fJvOsZgu2TYJfnCqC+4niDbRQDyppqbk4kTkxtuqPZKdq4JE1rPY6yrq/ZKgN2tuaU5E38+MTcs7FiDbcKgCZny/6akrtZgY9sEINvU3JyMHZv8+Mcd72XTmprkzDOTW24RgVCS5pbmjP3R2Pz48R+3XaS5o6hJTc485szcMuYWEcg2OQeQbfrqV5Pbbut48Ze0PqcZM5Jrr632SoDd6au//Wpue/y2Dhd/SVJJJTMen5Fr5xtsbJsjgGzVE08kTU2t9+jtyDp1Sh5+2B1EoARPvPBEmr7dlHXNHXuwdarrlIcvetgdRNgqRwDZoubmZPz4Pff6fjtTS0vrc632BayBXau5pTnjfzx+j72+387UUmnJ+NvHp7nFYGPLBCBbNHVqsmDBzr9/755ow4bW5zp1arVXAuxKUx+emgXLFuz0+/fuiTa0bMiCvyzI1IcNNrZMALJFP/xhWW+MqK1Nbr652qsAdqUfLvph6mrKGWy1NbW5+RGDjS1zDiCbWbEiOeCAMo7+vV59fbJ8edK9e7VXAuxsK15bkQO+dEARR/9er762PssvWZ7unbtXeynsYRwBZDM//3l58Ze0Puc77qj2KoBd4edP/ry4+EtaXwq+40mDjc0JQDYzd27r0bDSNDS0Pneg45m7dG7qa8sbbA21DZn7R4ONzQlANrNyZRnv/n2j5ubW5w50PCvXrizi3b9v1Fxpzsq1BhubE4BsZt26jnnh5zdTqSRr11Z7FcCusK55XUo85b1SqWTtBoONzQlANtO1a+u7YktTW5vst1+1VwHsCl336pramvIGW21Nbfbby2Bjc+X9a+BNHX54tVdQHTU1yWGHVXsVwK5weLcyB1tNanJYN4ONzQlANnP66WXeFWPDhuSMM6q9CmBXOL3P6WmulDfYNlQ25Iyjz6j2MtgDCUA2M3hwcvDB1V7F7nfIIcmgQdVeBbArDO45OAd3LW+wHbLfIRn0boONzQlANlNTk4wdW9alYOrrW59zTU21VwLsCjU1NRnbb2xRl4Kpr63P2L5jU2OwsQUCkC266KKyYqi2NrnwwmqvAtiVLjr2otSknMFWW1ObC4812NgyAcgWHXVUctVV1V7F7nPVVa3PGei4jtr/qFw1spzBdtWIq3LU/gYbW+ZewGxVc3MybFjy4IMd99Zw9fXJkCHJffcldeXcIx6K1dzSnGHfG5YH//pgh701XH1tfYb0HJL7JtyXulqDjS1zBJCtqqtLbrqpNZI64nUB6+pan9v3vy/+oBR1tXW56cM3pb62vkNeF7Cupi71tfX5/hnfF39sU8f7289O1adPcvfdSZcuHSuS6uqSffZJ7rmn9TkC5ejzzj65e/zd6dLQJXU1HWew1dXUZZ+GfXLP+HvS550GG9vmJWC2y8MPJyNGtN4rt72/HFxfn3Tr1hp/TU3VXg1QLQ8/93BGfH9EVq5d2e5fDq6vqU+3zt1yzz/dk6YDDTbenABkuz31VDJ8ePLss+33QtF1dUnPnsns2Unv3tVeDVBtT734VIZ/f3ieXfVsu71QdF1NXXp27ZnZ/zQ7vXsYbGwfLwGz3Xr3ThYuTM46q/Xn7em8wI1rPeus1ucg/oAk6d2jdxZeuDBn9WsdbLXt6L/FjWs9q99ZWXjhQvHHDnEEkLfkJz9JPvGJZNmyZE//G1RTkxx0UHLdda23uQPYkp8s/kk+8YtPZNmqZalkzx5sNanJQV0PynUfvC6nH22wseMEIG/ZmjXJl7+cTJ7c+uNkz4nBjRex3nvv5NJLk898pvXHANuyZv2afHnelzP5vslZs751sO0pMbjxItZ7N+ydS997aT4z7DPZu8Fg460RgLxtq1cn06cnX/ta8thjrW+yqNYbRTbuu2/f5JOfTMaNS/bdtzprAdqv1etWZ/rD0/O1BV/LY8sfS31tfdXeKLJx330P6JtPHv/JjGsal307GWy8PQKQnaZSSebNa32p9Uc/Stav3z0xuHEfDQ3JmDHJpEnJiSeWdSs7YNeoVCqZ96d5ue7+6/Kjx36U9S3rd0sMbtxHQ21DxvQdk0nHT8qJh5zovr7sNAKQXWLNmtYYvOeeZObM1ruJtLS0vgu3trY1Dt+KhobW7TQ3t25nyJDk1FNbL1EzbJiXeYFdZ836NZn3p3m555l7MvPpmXnw2QfTUmlJXU1damtqs77lrQ22htqGtFRa0lxpTm1NbYb0HJJTjzw1I3qNyLBDh3mZl11CALJbvPxycu+9ySOPJEuXJs8803pZmT/9KXnttW0/tnPn5NBDW9+526tXcvjhSf/+ycknJ1277p71A7zRy2tfzr1/vDePPP9Ilq5YmmdWPJOnXnwqf3r5T3ltw7YHW+f6zjl0v0PTu0fv9OreK4d3Pzz939U/Jx92crruZbCx6wlAqqpSSV54ofUC0+vW/f3IYEND0qlT6wWb3/lOL+cC7UelUskLr76QlWtXZl3zuqxvbh1sDXUN6VTXKd326pZ37vNOL+dSVQIQAKAw7eeKlwAA7BQCEACgMAIQAKAwAhAAoDACEACgMAIQAKAwAhAAoDD11V4AO2bNmjX53e9+l1deeaXaS+Et6NKlS4YMGZK93bMOdkilUsmjjz6aZcuWxeVrd4+ampocdNBB6devn4tWd0ACsJ2oVCq56qqrcvXVV4u/dq5Lly657LLL8rnPfc5Qhe3wu9/9Luecc06eeuqpai+lSL17987NN9+cIUOGVHsp7EQCsJ2YNm1aLr/88vzbv/1bxo8fn/333188tDOVSiV/+9vfctNNN+Xyyy/PYYcdlvPOO6/ay4I92sqVKzNq1Kj8wz/8Q+68884cffTRqaurq/ayitDc3JzFixfniiuuyKhRo7JkyZJ069at2stiJ3EruHbi1FNPTaVSyaxZs6q9FHaCkSNHpra2NnfddVe1lwJ7tOnTp+cf//Efs3Tp0hx22GHVXk6R/vjHP+bwww/PtGnTMm7cuGovh53Em0DaicWLF2fYsGHVXgY7yXvf+94sXry42suAPd7ixYtz6KGHir8qOuyww3LooYeaWR2MAGwnNmzYkE6dOm3yuSOOOCIPPfTQJp875ZRTcvvtt7/p9l7/684///x89atffVvr2979luKxxx7L+9///jQ1NeXoo4/OHXfcscnXO3XqlA0bNlRpddB+bGn2JZvOv/PPPz9z5sx52/u68sorc/TRR2fo0KF54IEHcvbZZ7/tbW7NihUrMnny5E0+tyvm6FuZ71tah5nV8TgHkA6rubm5aucKVSqVTJkyJX369Mlvf/vbfOQjH8lf/vKXqqwF2D5f/OIX8/TTT6dnz55JkltuuWWX7WtjAF566aW7bB+wLY4AdmCrVq3KBRdckOOPPz5NTU2ZOHFi1q1bt83HrF69OhMmTEj//v3Tv3//fP7zn9/u/d177705+eST8573vCcXXXRR2+eff/75nHnmmWlsbEz//v3zne98J0kyc+bMjB49Okny8ssvp6GhIVOmTEmS3HTTTZkwYcJm+/jrX/+a4cOHZ8iQIenXr18mTZqUlpaWJMmNN96Y4cOH5yMf+UgaGxuzYMGC3H///RkxYkSOPfbYDBo0KLfeeutm21y2bFkOPPDAvPrqq22fO/fcc/Otb30rSfKrX/0qgwcPTlNTU97//vfnscceS5LMmTMnAwcObHvMI488kiOOOCJJ0q9fv/Tp0ydJ8uqrr2avvfba7t9HYMd069at7Sjh+eefn4kTJ2bUqFHp1atXJkyYkAULFuSUU07JkUcemU996lNb3MawYcPy2muvZfTo0fnkJz+5yb/vJUuWpHv37rnyyiszZMiQ9O7dO7/4xS/aHrs9c+aNLrrooqxatSoDBw7Mscce2/b5rc3RH/zgBxk6dGgGDRqUAQMG5Gc/+1nb10455ZR85jOf2eLjXm/u3Lnp27dvHnjggSxfvjyjR49OY2Njmpqa8s///M9vumY6FkcA27mzzz57k2vKvf4yCZ/+9Kdz8skn5/rrr0+lUskFF1yQa6+9NpdccslWt/df//VfWbt2bR5++OGsWbMmJ510Uo4++ujteinkD3/4Q2bPnp3169enb9+++c1vfpMTTzwx//qv/5o+ffrktttuy/PPP58hQ4ZkwIABOfnkk3POOedk7dq1mT17do477rjMmjUrEydOzF133ZXTTjtts3107949P/vZz7Lvvvumubk5p59+ev73f/8355xzTpJk/vz5WbhwYfr06ZMVK1Zk+PDh+cUvfpGePXvmhRdeyODBgzNs2LAcfPDBbds86KCDMmrUqEybNi0TJ07Mc889l1mzZmXKlCl5/vnnc+6552bOnDlpbGzM9OnTM2bMmDz66KPb9efz5JNP5vzzz8+3v/3t7fr1wI679tprN/n5okWLMnv27NTW1qZv37556aWXctddd2XdunU58sgj8y//8i/p16/fJo+ZN29eampqMnfu3HTv3n2zl5RXrlyZpqamfP7zn8+dd96Ziy++OB/84AezYsWKTJw48U3nzBt9+9vfzsCBAzc7jWdrc/QDH/hAPvrRj6ampiZLlizJCSeckKVLl7Z9c7m1x210yy235Oqrr84dd9yRXr165Stf+Up69eqVmTNnJklefPHFHf1tp51zBLCdu+WWW/LQQw+1fbz+O8nbb789X/rSlzJw4MAMGjQoc+fOfdPraM2aNSsXXHBBamtr06VLl4wfP36736l69tlnp76+PnvvvXcGDhyYP/zhD23bvPDCC5Mk73rXu3LmmWdm1qxZbb/uvvvuy6xZs3LppZfmwQcfTEtLS+65556MGDFis320tLTk3//93zNgwIAMGjQoDzzwwCYDdNiwYW1H3ubNm5enn346p512WgYOHJhRo0YlSZ544onNtnvxxRfnuuuuS5Jcf/31+ehHP5p999038+fPT2NjYxobG5Mk48aNy7Jly7b75dxx48blyiuvzAc/+MHt+vXA23f66aenc+fO6dSpUxobG/OBD3wgDQ0N6dKlS/r27Zv/+7//2+Ftdu7cOWeeeWaS5MQTT2ybbzsyZ7bH1uboM888k9NOOy39+/fPGWeckRdffDHPPPPMmz4uSaZOnZr/+Z//yezZs9OrV68kyQknnJBf/vKX+fSnP52f/OQn6dKly1taL+2XI4AdWKVSyYwZM3LUUUe95W3syLUGO3fu3Pbjurq6rZ4w/Pptjho1KrNmzcqvf/3rTJ48OY2NjZk2bVre8Y535N3vfvdmj73mmmvy/PPPZ/78+encuXM+9alP5bXXXmv7+r777tv240qlkn79+mXevHlvuvbjjz8+++yzT2bPnp0pU6Zs1+V26uvr09zc3Pbz169jo4ULF+bDH/7wm24L2HneOIu2dzZty1577dU2u+rq6tr+7e/InNkeW1vrOeeck8mTJ2fMmDFJkh49emwyc7b1HJuamjJ37twsWrQo73vf+5K0RuxDDz2UWbNm5bbbbssVV1yRhQsXpq6ubqe8oYY9nyOAHdgZZ5yRL3zhC22D4KWXXnrTI4CjRo3K9773vVQqlbzyyiuZOnVq23l63/jGN3LZZZft8DpGjRqV66+/PkmyfPny3HbbbTn11FPbvvaDH/wg3bt3T5cuXTJq1Kj853/+Z9t30W/00ksv5d3vfnc6d+6cv/71r9s812bYsGF55plnNom5hx56aKvnQV588cUZP358jjnmmLZoPuGEE7Jo0aI88sgjSZKbb745Bx98cA4++OAceeSRWbp0aZYvX56k9bvsN5o2bVq6du36Zr9FQDv1ZnNm5MiRWbBgwWaP22+//bJmzZo3PS97o5deeqnt6N20adPy0ksvbfcaN54zOGHChNx5551JWo8o7rvvvhk7dmy+/vWv58knn8zq1au3e5u0fwKwA/vKV77S9nJAU1NTRo4cmSVLlmzzMVdccUUaGhrS2NiYoUOH5kMf+lDGjh2bpPXSJvvvv/8Or+NrX/taHn/88TQ2Nmb48OH5j//4jwwdOjRJcuyxx2blypUZOXJkktYLXi9durTt52908cUXZ/78+enXr1/OO++8rYZikrzjHe/IHXfckauuuioDBgxI3759c+mll7a9aeSNxowZk9WrV2fSpEltnzvggAMyffr0jB8/Pk1NTfnWt76VW2+9te0emZ/97Gdz/PHH54QTTkiPHj022+bkyZO3eGQQ6Bi2NWeam5vz+9//Pocccshmj+vRo0fbXHn9qTtbc+2112bMmDEZNGhQFi5cuMPXRTzmmGPyq1/9KhdffHFmzJiROXPmZMiQIRk4cGCGDRuWL33pS213+fjYxz6W2bNn79D2aX/cCaSd6NmzZz7xiU/k8ssvr9oaTjrppPzyl7/ssEe0HnjggZx77rlZvHhxamt37fdG//3f/53rrrsuzz777C7dD7R3l112WW699dZ2eR/g+++/P9/5znfy3e9+t9pLedt69+6ds846K1dffXW1l8JO4hzAdqTarX7vvfdWdf+70sc+9rHMnDkz3/3ud3d5/CXV/7OE9qS9/ns57rjjctxxx1V7GTtFe/0zYOsEYDvRrVs3R4t2od39HfqyZcvSvXv33bpPaI+6deuW5cuXZ/369WloaKj2coq0fv36LF++3MzqYJwD2E6MHj06M2bMyJ///OdqL4W36c9//nNmzJjR9uYaYOtGjx6dVatW5cYbb6z2Uop14403ZtWqVWZWB+McwHZi6dKled/73pcXX3wxI0eOzP77779Dl2ih+iqVSv72t7/l7rvvTo8ePfLrX/86hx9+eLWXBXu0SqWSCRMm5MYbb8zQoUNzzDHHVO0Wj6Vpbm7O448/nvnz5+f888/PDTfc4P+dDkQAtiPLli3LDTfckDlz5uSVV16p9nJ4C7p06ZJTTjklEyZMyEEHHVTt5UC70NzcnJtvvjm33357/vKXvzgfbTepqanJwQcfnDPOOCPnnHOO8O5gBCAAQGGcAwgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUJj/DyUINMAbF8wxAAAAAElFTkSuQmCC", - "text/plain": [ - "
" + "
" ] }, "metadata": {}, @@ -171,19 +282,11 @@ "text": [ "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", - "\u001b[32m***** Response from calling function \"python\" *****\u001b[0m\n", - "Text(1.5, 1.3, \"I'm fine, thanks!\")\n", - "\u001b[32m***************************************************\u001b[0m\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", - "\n", - "The drawing of two agents with example dialog has been executed, but as instructed, `plt.show()` has not been added, so the image will not be displayed here. However, the script created a matplotlib figure with two agents represented by circles, one blue and one green, along with example dialog text in speech bubbles.\n", - "\n", - "--------------------------------------------------------------------------------\n", "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", - "\n", + "\u001b[32m***** Response from calling tool \"call_ujcz2CkK0UgEEUen7X1ctXhe\" *****\u001b[0m\n", + "None\n", + "\u001b[32m**********************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", @@ -195,56 +298,12 @@ } ], "source": [ - "llm_config = {\n", - " \"config_list\": config_list,\n", - " \"timeout\": 120,\n", - "}\n", - "chatbot = autogen.AssistantAgent(\n", - " name=\"chatbot\",\n", - " system_message=\"For coding tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", - " llm_config=llm_config,\n", - ")\n", - "\n", - "# create a UserProxyAgent instance named \"user_proxy\"\n", - "user_proxy = autogen.UserProxyAgent(\n", - " name=\"user_proxy\",\n", - " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", - " human_input_mode=\"NEVER\",\n", - " max_consecutive_auto_reply=10,\n", - " code_execution_config={\n", - " \"work_dir\": \"coding\",\n", - " \"use_docker\": False,\n", - " }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n", - ")\n", - "\n", - "\n", - "# define functions according to the function description\n", - "\n", - "\n", - "@user_proxy.register_for_execution()\n", - "@chatbot.register_for_llm(name=\"python\", description=\"run cell in ipython and return the execution result.\")\n", - "def exec_python(cell: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", - " ipython = get_ipython()\n", - " result = ipython.run_cell(cell)\n", - " log = str(result.result)\n", - " if result.error_before_exec is not None:\n", - " log += f\"\\n{result.error_before_exec}\"\n", - " if result.error_in_exec is not None:\n", - " log += f\"\\n{result.error_in_exec}\"\n", - " return log\n", - "\n", - "\n", - "@user_proxy.register_for_execution()\n", - "@chatbot.register_for_llm(name=\"sh\", description=\"run a shell script and return the execution result.\")\n", - "def exec_sh(script: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", - " return user_proxy.execute_code_blocks([(\"sh\", script)])\n", - "\n", - "\n", - "# start the conversation\n", - "user_proxy.initiate_chat(\n", - " chatbot,\n", - " message=\"Draw two agents chatting with each other with an example dialog. Don't add plt.show().\",\n", - ")" + "with Cache.disk():\n", + " # start the conversation\n", + " user_proxy.initiate_chat(\n", + " chatbot,\n", + " message=\"Draw two agents chatting with each other with an example dialog. Don't add plt.show().\",\n", + " )" ] }, { diff --git a/notebook/agentchat_function_call_async.ipynb b/notebook/agentchat_function_call_async.ipynb index ca65291b2c04..bb6fa48d6619 100644 --- a/notebook/agentchat_function_call_async.ipynb +++ b/notebook/agentchat_function_call_async.ipynb @@ -61,7 +61,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "id": "dca301a4", "metadata": {}, "outputs": [], @@ -71,6 +71,7 @@ "from typing_extensions import Annotated\n", "\n", "import autogen\n", + "from autogen.cache import Cache\n", "\n", "config_list = autogen.config_list_from_json(\n", " \"OAI_CONFIG_LIST\",\n", @@ -119,9 +120,73 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "9fb85afb", "metadata": {}, + "outputs": [], + "source": [ + "llm_config = {\n", + " \"config_list\": config_list,\n", + "}\n", + "\n", + "coder = autogen.AssistantAgent(\n", + " name=\"chatbot\",\n", + " system_message=\"For coding tasks, only use the functions you have been provided with. You have a stopwatch and a timer, these tools can and should be used in parallel. Reply TERMINATE when the task is done.\",\n", + " llm_config=llm_config,\n", + ")\n", + "\n", + "# create a UserProxyAgent instance named \"user_proxy\"\n", + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"user_proxy\",\n", + " system_message=\"A proxy for the user for executing code.\",\n", + " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=10,\n", + " code_execution_config={\"work_dir\": \"coding\"},\n", + ")\n", + "\n", + "# define functions according to the function description\n", + "\n", + "# An example async function registered using register_for_llm and register_for_execution decorators\n", + "\n", + "\n", + "@user_proxy.register_for_execution()\n", + "@coder.register_for_llm(description=\"create a timer for N seconds\")\n", + "async def timer(num_seconds: Annotated[str, \"Number of seconds in the timer.\"]) -> str:\n", + " for i in range(int(num_seconds)):\n", + " time.sleep(1)\n", + " # should print to stdout\n", + " return \"Timer is done!\"\n", + "\n", + "\n", + "# An example sync function registered using register_function\n", + "def stopwatch(num_seconds: Annotated[str, \"Number of seconds in the stopwatch.\"]) -> str:\n", + " for i in range(int(num_seconds)):\n", + " time.sleep(1)\n", + " return \"Stopwatch is done!\"\n", + "\n", + "\n", + "autogen.agentchat.register_function(\n", + " stopwatch,\n", + " caller=coder,\n", + " executor=user_proxy,\n", + " description=\"create a stopwatch for N seconds\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "159cd7b6", + "metadata": {}, + "source": [ + "Start the conversation. `await` is used to pause and resume code execution for async IO operations. Without `await`, an async function returns a coroutine object but doesn't execute the function. With `await`, the async function is executed and the current function is paused until the awaited function returns a result." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "37514ea3", + "metadata": {}, "outputs": [ { "name": "stdout", @@ -134,26 +199,32 @@ "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", - "\u001b[32m***** Suggested tool Call (call_fGgH8U261nOnx3JGNJWslhh6): timer *****\u001b[0m\n", + "\u001b[32m***** Suggested tool Call (call_h6324df0CdGPDNjPO8GrnAQJ): timer *****\u001b[0m\n", "Arguments: \n", "{\"num_seconds\":\"5\"}\n", "\u001b[32m**********************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[35m\n", - ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n", + ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", - "\u001b[32m***** Response from calling tool \"timer\" *****\u001b[0m\n", + "\u001b[32m***** Response from calling tool \"call_h6324df0CdGPDNjPO8GrnAQJ\" *****\u001b[0m\n", "Timer is done!\n", - "\u001b[32m**********************************************\u001b[0m\n", + "\u001b[32m**********************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", - "\u001b[32m***** Suggested tool Call (call_BZs6ynF8gtcZKhONiIRZkECB): stopwatch *****\u001b[0m\n", + "\u001b[32m***** Suggested tool Call (call_7SzbQxI8Nsl6dPQtScoSGPAu): stopwatch *****\u001b[0m\n", "Arguments: \n", "{\"num_seconds\":\"5\"}\n", "\u001b[32m**************************************************************************\u001b[0m\n", @@ -165,9 +236,9 @@ "\n", "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", - "\u001b[32m***** Response from calling tool \"stopwatch\" *****\u001b[0m\n", + "\u001b[32m***** Response from calling tool \"call_7SzbQxI8Nsl6dPQtScoSGPAu\" *****\u001b[0m\n", "Stopwatch is done!\n", - "\u001b[32m**************************************************\u001b[0m\n", + "\u001b[32m**********************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", @@ -179,57 +250,11 @@ } ], "source": [ - "llm_config = {\n", - " \"config_list\": config_list,\n", - "}\n", - "\n", - "coder = autogen.AssistantAgent(\n", - " name=\"chatbot\",\n", - " system_message=\"For coding tasks, only use the functions you have been provided with. You have a stopwatch and a timer, these tools can and should be used in parallel. Reply TERMINATE when the task is done.\",\n", - " llm_config=llm_config,\n", - ")\n", - "\n", - "# create a UserProxyAgent instance named \"user_proxy\"\n", - "user_proxy = autogen.UserProxyAgent(\n", - " name=\"user_proxy\",\n", - " system_message=\"A proxy for the user for executing code.\",\n", - " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", - " human_input_mode=\"NEVER\",\n", - " max_consecutive_auto_reply=10,\n", - " code_execution_config={\"work_dir\": \"coding\"},\n", - ")\n", - "\n", - "# define functions according to the function description\n", - "\n", - "# An example async function\n", - "\n", - "\n", - "@user_proxy.register_for_execution()\n", - "@coder.register_for_llm(description=\"create a timer for N seconds\")\n", - "async def timer(num_seconds: Annotated[str, \"Number of seconds in the timer.\"]) -> str:\n", - " for i in range(int(num_seconds)):\n", - " time.sleep(1)\n", - " # should print to stdout\n", - " return \"Timer is done!\"\n", - "\n", - "\n", - "# An example sync function\n", - "@user_proxy.register_for_execution()\n", - "@coder.register_for_llm(description=\"create a stopwatch for N seconds\")\n", - "def stopwatch(num_seconds: Annotated[str, \"Number of seconds in the stopwatch.\"]) -> str:\n", - " for i in range(int(num_seconds)):\n", - " time.sleep(1)\n", - " return \"Stopwatch is done!\"\n", - "\n", - "\n", - "# start the conversation\n", - "# 'await' is used to pause and resume code execution for async IO operations.\n", - "# Without 'await', an async function returns a coroutine object but doesn't execute the function.\n", - "# With 'await', the async function is executed and the current function is paused until the awaited function returns a result.\n", - "await user_proxy.a_initiate_chat( # noqa: F704\n", - " coder,\n", - " message=\"Create a timer for 5 seconds and then a stopwatch for 5 seconds.\",\n", - ")" + "with Cache.disk():\n", + " await user_proxy.a_initiate_chat( # noqa: F704\n", + " coder,\n", + " message=\"Create a timer for 5 seconds and then a stopwatch for 5 seconds.\",\n", + " )" ] }, { @@ -243,7 +268,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "id": "2472f95c", "metadata": {}, "outputs": [], @@ -276,9 +301,17 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "612bdd22", + "metadata": {}, + "source": [ + "Finally, we initialize the chat that would use the functions defined above:" + ] + }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 6, "id": "e2c9267a", "metadata": {}, "outputs": [ @@ -293,14 +326,21 @@ "2) Pretty print the result as md.\n", "3) when 1 and 2 are done, terminate the group chat\n", "\n", - "--------------------------------------------------------------------------------\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", "\n", - "\u001b[32m***** Suggested tool Call (call_zlHKR9LBzCqs1iLId5kvNvJ5): timer *****\u001b[0m\n", + "\u001b[32m***** Suggested tool Call (call_qlS3QkcY1NkfgpKtCoR6oGo7): timer *****\u001b[0m\n", "Arguments: \n", "{\"num_seconds\": \"5\"}\n", "\u001b[32m**********************************************************************\u001b[0m\n", - "\u001b[32m***** Suggested tool Call (call_rH1dgbS9itiJO1Gwnxxhcm35): stopwatch *****\u001b[0m\n", + "\u001b[32m***** Suggested tool Call (call_TEHlvMgCp0S3RzBbVsVPXWeL): stopwatch *****\u001b[0m\n", "Arguments: \n", "{\"num_seconds\": \"5\"}\n", "\u001b[32m**************************************************************************\u001b[0m\n", @@ -314,29 +354,23 @@ "\n", "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", - "\u001b[32m***** Response from calling tool \"timer\" *****\u001b[0m\n", + "\u001b[32m***** Response from calling tool \"call_qlS3QkcY1NkfgpKtCoR6oGo7\" *****\u001b[0m\n", "Timer is done!\n", - "\u001b[32m**********************************************\u001b[0m\n", + "\u001b[32m**********************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", - "\u001b[32m***** Response from calling tool \"stopwatch\" *****\u001b[0m\n", + "\u001b[32m***** Response from calling tool \"call_TEHlvMgCp0S3RzBbVsVPXWeL\" *****\u001b[0m\n", "Stopwatch is done!\n", - "\u001b[32m**************************************************\u001b[0m\n", + "\u001b[32m**********************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", "\n", - "The results of the timer and stopwatch are as follows:\n", - "\n", - "- Timer: Timer is done!\n", - "- Stopwatch: Stopwatch is done!\n", - "\n", - "Now, I will proceed to terminate the group chat.\n", - "\u001b[32m***** Suggested tool Call (call_3Js7oU80vPatnA8IiaKXB5Xu): terminate_group_chat *****\u001b[0m\n", + "\u001b[32m***** Suggested tool Call (call_JuQwvj4FigfvGyBeTMglY2ee): terminate_group_chat *****\u001b[0m\n", "Arguments: \n", - "{\"message\":\"The session has concluded, and the group chat will now be terminated.\"}\n", + "{\"message\":\"Both timer and stopwatch have completed their countdowns. The group chat is now being terminated.\"}\n", "\u001b[32m*************************************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -346,23 +380,26 @@ "\n", "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", - "\u001b[32m***** Response from calling tool \"terminate_group_chat\" *****\u001b[0m\n", - "[GROUPCHAT_TERMINATE] The session has concluded, and the group chat will now be terminated.\n", - "\u001b[32m*************************************************************\u001b[0m\n", + "\u001b[32m***** Response from calling tool \"call_JuQwvj4FigfvGyBeTMglY2ee\" *****\u001b[0m\n", + "[GROUPCHAT_TERMINATE] Both timer and stopwatch have completed their countdowns. The group chat is now being terminated.\n", + "\u001b[32m**********************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n" ] } ], "source": [ - "# todo: remove comment after fixing https://github.com/microsoft/autogen/issues/1205\n", - "await user_proxy.a_initiate_chat( # noqa: F704\n", - " manager,\n", - " message=\"\"\"\n", + "message = \"\"\"\n", "1) Create a timer and a stopwatch for 5 seconds each in parallel.\n", "2) Pretty print the result as md.\n", - "3) when 1 and 2 are done, terminate the group chat\"\"\",\n", - ")" + "3) when 1 and 2 are done, terminate the group chat\n", + "\"\"\"\n", + "\n", + "with Cache.disk():\n", + " await user_proxy.a_initiate_chat( # noqa: F704\n", + " manager,\n", + " message=message,\n", + " )" ] }, { @@ -390,7 +427,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.5" + "version": "3.10.13" } }, "nbformat": 4, diff --git a/notebook/agentchat_function_call_currency_calculator.ipynb b/notebook/agentchat_function_call_currency_calculator.ipynb index e42f91c1a6a4..6637a4a1ab2d 100644 --- a/notebook/agentchat_function_call_currency_calculator.ipynb +++ b/notebook/agentchat_function_call_currency_calculator.ipynb @@ -63,6 +63,8 @@ "from typing_extensions import Annotated\n", "\n", "import autogen\n", + "from autogen.cache import Cache\n", + "\n", "\n", "config_list = autogen.config_list_from_json(\n", " \"OAI_CONFIG_LIST\",\n", @@ -274,9 +276,9 @@ "\n", "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", - "\u001b[32m***** Response from calling tool \"currency_calculator\" *****\u001b[0m\n", + "\u001b[32m***** Response from calling tool \"call_ubo7cKE3TKumGHkqGjQtZisy\" *****\u001b[0m\n", "112.22727272727272 EUR\n", - "\u001b[32m************************************************************\u001b[0m\n", + "\u001b[32m**********************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", @@ -298,11 +300,12 @@ } ], "source": [ - "# start the conversation\n", - "user_proxy.initiate_chat(\n", - " chatbot,\n", - " message=\"How much is 123.45 USD in EUR?\",\n", - ")" + "with Cache.disk():\n", + " # start the conversation\n", + " user_proxy.initiate_chat(\n", + " chatbot,\n", + " message=\"How much is 123.45 USD in EUR?\",\n", + " )" ] }, { @@ -353,14 +356,21 @@ " amount: Annotated[float, Field(0, description=\"Amount of currency\", ge=0)]\n", "\n", "\n", - "@user_proxy.register_for_execution()\n", - "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", + "# another way to register a function is to use register_function instead of register_for_execution and register_for_llm decorators\n", "def currency_calculator(\n", " base: Annotated[Currency, \"Base currency: amount and currency symbol\"],\n", " quote_currency: Annotated[CurrencySymbol, \"Quote currency symbol\"] = \"USD\",\n", ") -> Currency:\n", " quote_amount = exchange_rate(base.currency, quote_currency) * base.amount\n", - " return Currency(amount=quote_amount, currency=quote_currency)" + " return Currency(amount=quote_amount, currency=quote_currency)\n", + "\n", + "\n", + "autogen.agentchat.register_function(\n", + " currency_calculator,\n", + " caller=chatbot,\n", + " executor=user_proxy,\n", + " description=\"Currency exchange calculator.\",\n", + ")" ] }, { @@ -434,14 +444,14 @@ "\n", "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", - "\u001b[32m***** Response from calling tool \"currency_calculator\" *****\u001b[0m\n", + "\u001b[32m***** Response from calling tool \"call_0VuU2rATuOgYrGmcBnXzPXlh\" *****\u001b[0m\n", "{\"currency\":\"USD\",\"amount\":123.45300000000002}\n", - "\u001b[32m************************************************************\u001b[0m\n", + "\u001b[32m**********************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", - "112.23 Euros is equivalent to approximately 123.45 US Dollars.\n", + "112.23 Euros is approximately 123.45 US Dollars.\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", @@ -458,11 +468,12 @@ } ], "source": [ - "# start the conversation\n", - "user_proxy.initiate_chat(\n", - " chatbot,\n", - " message=\"How much is 112.23 Euros in US Dollars?\",\n", - ")" + "with Cache.disk():\n", + " # start the conversation\n", + " user_proxy.initiate_chat(\n", + " chatbot,\n", + " message=\"How much is 112.23 Euros in US Dollars?\",\n", + " )" ] }, { @@ -494,9 +505,9 @@ "\n", "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", - "\u001b[32m***** Response from calling tool \"currency_calculator\" *****\u001b[0m\n", + "\u001b[32m***** Response from calling tool \"call_A6lqMu7s5SyDvftTSeQTtPcj\" *****\u001b[0m\n", "{\"currency\":\"EUR\",\"amount\":112.22727272727272}\n", - "\u001b[32m************************************************************\u001b[0m\n", + "\u001b[32m**********************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", @@ -518,11 +529,12 @@ } ], "source": [ - "# start the conversation\n", - "user_proxy.initiate_chat(\n", - " chatbot,\n", - " message=\"How much is 123.45 US Dollars in Euros?\",\n", - ")" + "with Cache.disk():\n", + " # start the conversation\n", + " user_proxy.initiate_chat(\n", + " chatbot,\n", + " message=\"How much is 123.45 US Dollars in Euros?\",\n", + " )" ] }, { diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index 64b8473cf694..8ff8038da7e9 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -13,6 +13,7 @@ import autogen from autogen.agentchat import ConversableAgent, UserProxyAgent +from autogen.agentchat.conversable_agent import register_function from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST from conftest import skip_openai @@ -823,6 +824,47 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]): assert get_origin(user_proxy_1.function_map) == expected_function_map +def test_register_functions(): + with pytest.MonkeyPatch.context() as mp: + mp.setenv("OPENAI_API_KEY", "mock") + agent = ConversableAgent(name="agent", llm_config={"config_list": []}) + user_proxy = UserProxyAgent(name="user_proxy") + + def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: + pass + + register_function( + exec_python, + caller=agent, + executor=user_proxy, + description="run cell in ipython and return the execution result.", + ) + + expected_function_map = {"exec_python": exec_python} + assert get_origin(user_proxy.function_map) == expected_function_map + + expected = [ + { + "type": "function", + "function": { + "description": "run cell in ipython and return the execution result.", + "name": "exec_python", + "parameters": { + "type": "object", + "properties": { + "cell": { + "type": "string", + "description": "Valid Python cell to execute.", + } + }, + "required": ["cell"], + }, + }, + } + ] + assert agent.llm_config["tools"] == expected + + @pytest.mark.skipif( skip or not sys.version.startswith("3.10"), reason="do not run if openai is not installed or py!=3.10", @@ -860,7 +902,7 @@ def test_function_registration_e2e_sync() -> None: timer_mock = unittest.mock.MagicMock() stopwatch_mock = unittest.mock.MagicMock() - # An example async function + # An example async function registered using decorators @user_proxy.register_for_execution() @coder.register_for_llm(description="create a timer for N seconds") def timer(num_seconds: Annotated[str, "Number of seconds in the timer."]) -> str: @@ -873,9 +915,7 @@ def timer(num_seconds: Annotated[str, "Number of seconds in the timer."]) -> str timer_mock(num_seconds=num_seconds) return "Timer is done!" - # An example sync function - @user_proxy.register_for_execution() - @coder.register_for_llm(description="create a stopwatch for N seconds") + # An example sync function registered using register_function def stopwatch(num_seconds: Annotated[str, "Number of seconds in the stopwatch."]) -> str: print("stopwatch is running") # assert False, "stopwatch's alive!" @@ -887,6 +927,8 @@ def stopwatch(num_seconds: Annotated[str, "Number of seconds in the stopwatch."] stopwatch_mock(num_seconds=num_seconds) return "Stopwatch is done!" + register_function(stopwatch, caller=coder, executor=user_proxy, description="create a stopwatch for N seconds") + # start the conversation # 'await' is used to pause and resume code execution for async IO operations. # Without 'await', an async function returns a coroutine object but doesn't execute the function. @@ -938,9 +980,7 @@ async def test_function_registration_e2e_async() -> None: timer_mock = unittest.mock.MagicMock() stopwatch_mock = unittest.mock.MagicMock() - # An example async function - @user_proxy.register_for_execution() - @coder.register_for_llm(description="create a timer for N seconds") + # An example async function registered using register_function async def timer(num_seconds: Annotated[str, "Number of seconds in the timer."]) -> str: print("timer is running") for i in range(int(num_seconds)): @@ -951,7 +991,9 @@ async def timer(num_seconds: Annotated[str, "Number of seconds in the timer."]) timer_mock(num_seconds=num_seconds) return "Timer is done!" - # An example sync function + register_function(timer, caller=coder, executor=user_proxy, description="create a timer for N seconds") + + # An example sync function registered using decorators @user_proxy.register_for_execution() @coder.register_for_llm(description="create a stopwatch for N seconds") def stopwatch(num_seconds: Annotated[str, "Number of seconds in the stopwatch."]) -> str: diff --git a/test/agentchat/test_function_and_tool_calling.py b/test/agentchat/test_function_and_tool_calling.py new file mode 100644 index 000000000000..893fbe351203 --- /dev/null +++ b/test/agentchat/test_function_and_tool_calling.py @@ -0,0 +1,378 @@ +import json +from typing import Any, Callable, Dict, List + +import pytest + +from autogen.agentchat.conversable_agent import ConversableAgent + + +def _tool_func_1(arg1: str, arg2: str) -> str: + return f"_tool_func_1: {arg1} {arg2}" + + +def _tool_func_2(arg1: str, arg2: str) -> str: + return f"_tool_func_2: {arg1} {arg2}" + + +def _tool_func_error(arg1: str, arg2: str) -> str: + raise RuntimeError("Error in tool function") + + +async def _a_tool_func_1(arg1: str, arg2: str) -> str: + return f"_tool_func_1: {arg1} {arg2}" + + +async def _a_tool_func_2(arg1: str, arg2: str) -> str: + return f"_tool_func_2: {arg1} {arg2}" + + +async def _a_tool_func_error(arg1: str, arg2: str) -> str: + raise RuntimeError("Error in tool function") + + +_tool_use_message_1 = { + "role": "assistant", + "content": None, + "tool_calls": [ + { + "id": "1", + "type": "function", + "function": { + "name": "_tool_func_1", + "arguments": json.dumps({"arg1": "value1", "arg2": "value2"}), + }, + }, + { + "id": "2", + "type": "function", + "function": { + "name": "_tool_func_2", + "arguments": json.dumps({"arg1": "value3", "arg2": "value4"}), + }, + }, + ], +} + +_tool_use_message_1_bad_json = { + "role": "assistant", + "content": None, + "tool_calls": [ + { + "id": "1", + "type": "function", + "function": { + "name": "_tool_func_1", + # add extra comma to make json invalid + "arguments": json.dumps({"arg1": "value3", "arg2": "value4"})[:-1] + ",}", + }, + }, + { + "id": "2", + "type": "function", + "function": { + "name": "_tool_func_2", + "arguments": json.dumps({"arg1": "value3", "arg2": "value4"}), + }, + }, + ], +} + +_tool_use_message_1_expected_reply = { + "role": "tool", + "tool_responses": [ + {"tool_call_id": "1", "role": "tool", "content": "_tool_func_1: value1 value2"}, + {"tool_call_id": "2", "role": "tool", "content": "_tool_func_2: value3 value4"}, + ], + # "content": "Tool Call Id: 1\n_tool_func_1: value1 value2\n\nTool Call Id: 2\n_tool_func_2: value3 value4", + "content": "_tool_func_1: value1 value2\n\n_tool_func_2: value3 value4", +} + + +_tool_use_message_1_bad_json_expected_reply = { + "role": "tool", + "tool_responses": [ + { + "tool_call_id": "1", + "role": "tool", + "content": "Error: Expecting property name enclosed in double quotes: line 1 column 37 (char 36)\n You argument should follow json format.", + }, + {"tool_call_id": "2", "role": "tool", "content": "_tool_func_2: value3 value4"}, + ], + "content": "Error: Expecting property name enclosed in double quotes: line 1 column 37 (char 36)\n You argument should follow json format.\n\n_tool_func_2: value3 value4", +} + +_tool_use_message_1_error_expected_reply = { + "role": "tool", + "tool_responses": [ + {"tool_call_id": "1", "role": "tool", "content": "_tool_func_1: value1 value2"}, + { + "tool_call_id": "2", + "role": "tool", + "content": "Error: Error in tool function", + }, + ], + "content": "_tool_func_1: value1 value2\n\nError: Error in tool function", +} + +_tool_use_message_1_not_found_expected_reply = { + "role": "tool", + "tool_responses": [ + {"tool_call_id": "1", "role": "tool", "content": "_tool_func_1: value1 value2"}, + { + "tool_call_id": "2", + "role": "tool", + "content": "Error: Function _tool_func_2 not found.", + }, + ], + "content": "_tool_func_1: value1 value2\n\nError: Function _tool_func_2 not found.", +} + +_function_use_message_1 = { + "role": "assistant", + "content": None, + "function_call": { + "name": "_tool_func_1", + "arguments": json.dumps({"arg1": "value1", "arg2": "value2"}), + }, +} + +_function_use_message_1_bad_json = { + "role": "assistant", + "content": None, + "function_call": { + "name": "_tool_func_1", + "arguments": json.dumps({"arg1": "value1", "arg2": "value2"})[:-1] + ",}", + }, +} + +_function_use_message_1_expected_reply = { + "name": "_tool_func_1", + "role": "function", + "content": "_tool_func_1: value1 value2", +} + +_function_use_message_1_bad_json_expected_reply = { + "name": "_tool_func_1", + "role": "function", + "content": "Error: Expecting property name enclosed in double quotes: line 1 column 37 (char 36)\n You argument should follow json format.", +} + +_function_use_message_1_error_expected_reply = { + "name": "_tool_func_1", + "role": "function", + "content": "Error: Error in tool function", +} + +_function_use_message_1_not_found_expected_reply = { + "name": "_tool_func_1", + "role": "function", + "content": "Error: Function _tool_func_1 not found.", +} + +_text_message = {"content": "Hi!", "role": "user"} + + +def _get_function_map(is_function_async: bool, drop_tool_2: bool = False) -> Dict[str, Callable[..., Any]]: + if is_function_async: + return ( + { + "_tool_func_1": _a_tool_func_1, + "_tool_func_2": _a_tool_func_2, + } + if not drop_tool_2 + else { + "_tool_func_1": _a_tool_func_1, + } + ) + else: + return ( + { + "_tool_func_1": _tool_func_1, + "_tool_func_2": _tool_func_2, + } + if not drop_tool_2 + else { + "_tool_func_1": _tool_func_1, + } + ) + + +def _get_error_function_map( + is_function_async: bool, error_on_tool_func_2: bool = True +) -> Dict[str, Callable[..., Any]]: + if is_function_async: + return { + "_tool_func_1": _a_tool_func_1 if error_on_tool_func_2 else _a_tool_func_error, + "_tool_func_2": _a_tool_func_error if error_on_tool_func_2 else _a_tool_func_2, + } + else: + return { + "_tool_func_1": _tool_func_1 if error_on_tool_func_2 else _tool_func_error, + "_tool_func_2": _tool_func_error if error_on_tool_func_2 else _tool_func_2, + } + + +@pytest.mark.parametrize("is_function_async", [True, False]) +def test_generate_function_call_reply_on_function_call_message(is_function_async: bool) -> None: + agent = ConversableAgent(name="agent", llm_config=False) + + # empty function_map + agent._function_map = {} + messages = [_function_use_message_1] + finished, retval = agent.generate_function_call_reply(messages) + assert (finished, retval) == (True, _function_use_message_1_not_found_expected_reply) + + # function map set + agent._function_map = _get_function_map(is_function_async) + + # correct function call, multiple times to make sure cleanups are done properly + for _ in range(3): + messages = [_function_use_message_1] + finished, retval = agent.generate_function_call_reply(messages) + assert (finished, retval) == (True, _function_use_message_1_expected_reply) + + # bad JSON + messages = [_function_use_message_1_bad_json] + finished, retval = agent.generate_function_call_reply(messages) + assert (finished, retval) == (True, _function_use_message_1_bad_json_expected_reply) + + # tool call + messages = [_tool_use_message_1] + finished, retval = agent.generate_function_call_reply(messages) + assert (finished, retval) == (False, None) + + # text message + messages: List[Dict[str, str]] = [_text_message] + finished, retval = agent.generate_function_call_reply(messages) + assert (finished, retval) == (False, None) + + # error in function (raises Exception) + agent._function_map = _get_error_function_map(is_function_async, error_on_tool_func_2=False) + messages = [_function_use_message_1] + finished, retval = agent.generate_function_call_reply(messages) + assert (finished, retval) == (True, _function_use_message_1_error_expected_reply) + + +@pytest.mark.asyncio() +@pytest.mark.parametrize("is_function_async", [True, False]) +async def test_a_generate_function_call_reply_on_function_call_message(is_function_async: bool) -> None: + agent = ConversableAgent(name="agent", llm_config=False) + + # empty function_map + agent._function_map = {} + messages = [_function_use_message_1] + finished, retval = await agent.a_generate_function_call_reply(messages) + assert (finished, retval) == (True, _function_use_message_1_not_found_expected_reply) + + # function map set + agent._function_map = _get_function_map(is_function_async) + + # correct function call, multiple times to make sure cleanups are done properly + for _ in range(3): + messages = [_function_use_message_1] + finished, retval = await agent.a_generate_function_call_reply(messages) + assert (finished, retval) == (True, _function_use_message_1_expected_reply) + + # bad JSON + messages = [_function_use_message_1_bad_json] + finished, retval = await agent.a_generate_function_call_reply(messages) + assert (finished, retval) == (True, _function_use_message_1_bad_json_expected_reply) + + # tool call + messages = [_tool_use_message_1] + finished, retval = await agent.a_generate_function_call_reply(messages) + assert (finished, retval) == (False, None) + + # text message + messages: List[Dict[str, str]] = [_text_message] + finished, retval = await agent.a_generate_function_call_reply(messages) + assert (finished, retval) == (False, None) + + # error in function (raises Exception) + agent._function_map = _get_error_function_map(is_function_async, error_on_tool_func_2=False) + messages = [_function_use_message_1] + finished, retval = await agent.a_generate_function_call_reply(messages) + assert (finished, retval) == (True, _function_use_message_1_error_expected_reply) + + +@pytest.mark.parametrize("is_function_async", [True, False]) +def test_generate_tool_calls_reply_on_function_call_message(is_function_async: bool) -> None: + agent = ConversableAgent(name="agent", llm_config=False) + + # empty function_map + agent._function_map = _get_function_map(is_function_async, drop_tool_2=True) + messages = [_tool_use_message_1] + finished, retval = agent.generate_tool_calls_reply(messages) + assert (finished, retval) == (True, _tool_use_message_1_not_found_expected_reply) + + # function map set + agent._function_map = _get_function_map(is_function_async) + + # correct function call, multiple times to make sure cleanups are done properly + for _ in range(3): + messages = [_tool_use_message_1] + finished, retval = agent.generate_tool_calls_reply(messages) + assert (finished, retval) == (True, _tool_use_message_1_expected_reply) + + # bad JSON + messages = [_tool_use_message_1_bad_json] + finished, retval = agent.generate_tool_calls_reply(messages) + assert (finished, retval) == (True, _tool_use_message_1_bad_json_expected_reply) + + # function call + messages = [_function_use_message_1] + finished, retval = agent.generate_tool_calls_reply(messages) + assert (finished, retval) == (False, None) + + # text message + messages: List[Dict[str, str]] = [_text_message] + finished, retval = agent.generate_tool_calls_reply(messages) + assert (finished, retval) == (False, None) + + # error in function (raises Exception) + agent._function_map = _get_error_function_map(is_function_async) + messages = [_tool_use_message_1] + finished, retval = agent.generate_tool_calls_reply(messages) + assert (finished, retval) == (True, _tool_use_message_1_error_expected_reply) + + +@pytest.mark.asyncio() +@pytest.mark.parametrize("is_function_async", [True, False]) +async def test_a_generate_tool_calls_reply_on_function_call_message(is_function_async: bool) -> None: + agent = ConversableAgent(name="agent", llm_config=False) + + # empty function_map + agent._function_map = _get_function_map(is_function_async, drop_tool_2=True) + messages = [_tool_use_message_1] + finished, retval = await agent.a_generate_tool_calls_reply(messages) + assert (finished, retval) == (True, _tool_use_message_1_not_found_expected_reply) + + # function map set + agent._function_map = _get_function_map(is_function_async) + + # correct function call, multiple times to make sure cleanups are done properly + for _ in range(3): + messages = [_tool_use_message_1] + finished, retval = await agent.a_generate_tool_calls_reply(messages) + assert (finished, retval) == (True, _tool_use_message_1_expected_reply) + + # bad JSON + messages = [_tool_use_message_1_bad_json] + finished, retval = await agent.a_generate_tool_calls_reply(messages) + assert (finished, retval) == (True, _tool_use_message_1_bad_json_expected_reply) + + # function call + messages = [_function_use_message_1] + finished, retval = await agent.a_generate_tool_calls_reply(messages) + assert (finished, retval) == (False, None) + + # text message + messages: List[Dict[str, str]] = [_text_message] + finished, retval = await agent.a_generate_tool_calls_reply(messages) + assert (finished, retval) == (False, None) + + # error in function (raises Exception) + agent._function_map = _get_error_function_map(is_function_async) + messages = [_tool_use_message_1] + finished, retval = await agent.a_generate_tool_calls_reply(messages) + assert (finished, retval) == (True, _tool_use_message_1_error_expected_reply) diff --git a/website/docs/Use-Cases/agent_chat.md b/website/docs/Use-Cases/agent_chat.md index 17be8b91b48d..dbdc6a952073 100644 --- a/website/docs/Use-Cases/agent_chat.md +++ b/website/docs/Use-Cases/agent_chat.md @@ -102,7 +102,6 @@ user_proxy = autogen.UserProxyAgent( ``` python CurrencySymbol = Literal["USD", "EUR"] - def exchange_rate(base_currency: CurrencySymbol, quote_currency: CurrencySymbol) -> float: if base_currency == quote_currency: return 1.0 @@ -156,12 +155,30 @@ you can call the decorators as functions: ```python # Register the function with the chatbot's llm_config. -chatbot.register_for_llm(description="Currency exchange calculator.")(currency_calculator) +currency_calculator = chatbot.register_for_llm(description="Currency exchange calculator.")(currency_calculator) # Register the function with the user_proxy's function_map. user_proxy.register_for_execution()(currency_calculator) ``` +Alternatevely, you can also use `autogen.agentchat.register_function()` instead as follows: +```python +def currency_calculator( + base_amount: Annotated[float, "Amount of currency in base_currency"], + base_currency: Annotated[CurrencySymbol, "Base currency"] = "USD", + quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR", +) -> str: + quote_amount = exchange_rate(base_currency, quote_currency) * base_amount + return f"{quote_amount} {quote_currency}" + +autogen.agentchat.register_function( + currency_calculator, + agent=chatbot, + executor=user_proxy, + description="Currency exchange calculator.", +) +``` + 4. Agents can now use the function as follows: ```python user_proxy.initiate_chat( @@ -216,14 +233,19 @@ class Currency(BaseModel): # parameter of type float, must be greater or equal to 0 with default value 0 amount: Annotated[float, Field(0, description="Amount of currency", ge=0)] -@user_proxy.register_for_execution() -@chatbot.register_for_llm(description="Currency exchange calculator.") def currency_calculator( base: Annotated[Currency, "Base currency: amount and currency symbol"], quote_currency: Annotated[CurrencySymbol, "Quote currency symbol"] = "USD", ) -> Currency: quote_amount = exchange_rate(base.currency, quote_currency) * base.amount return Currency(amount=quote_amount, currency=quote_currency) + +autogen.agentchat.register_function( + currency_calculator, + agent=chatbot, + executor=user_proxy, + description="Currency exchange calculator.", +) ``` The generated JSON schema has additional properties such as minimum value encoded: