From 8d5a3c86313c63f7232cd256e50ae41b88b6a693 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Fri, 27 Dec 2024 20:34:19 -0800 Subject: [PATCH 01/11] Doc update to include model context usage --- .../agents/_assistant_agent.py | 29 +++-- .../tutorial/agents.ipynb | 106 +++++++++++++----- 2 files changed, 94 insertions(+), 41 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 9be3adcdc99..03dbedf141e 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -72,7 +72,6 @@ class AssistantAgent(BaseChatAgent): the inner messages as they are created, and the :class:`~autogen_agentchat.base.Response` object as the last item before closing the generator. - .. note:: The caller must only pass the new messages to the agent on each call @@ -80,6 +79,11 @@ class AssistantAgent(BaseChatAgent): The agent maintains its state between calls to these methods. Do not pass the entire conversation history to the agent on each call. + .. note:: + The assistant agent is not thread-safe or coroutine-safe. + It should not be shared between multiple tasks or coroutines, and it should + not call its methods concurrently. + Tool call behavior: * If the model returns no tool call, then the response is immediately returned as a :class:`~autogen_agentchat.messages.TextMessage` in :attr:`~autogen_agentchat.base.Response.chat_message`. @@ -87,6 +91,12 @@ class AssistantAgent(BaseChatAgent): - When `reflect_on_tool_use` is False (default), the tool call results are returned as a :class:`~autogen_agentchat.messages.ToolCallSummaryMessage` in :attr:`~autogen_agentchat.base.Response.chat_message`. `tool_call_summary_format` can be used to customize the tool call summary. - When `reflect_on_tool_use` is True, the another model inference is made using the tool calls and results, and the text response is returned as a :class:`~autogen_agentchat.messages.TextMessage` in :attr:`~autogen_agentchat.base.Response.chat_message`. + .. note:: + By default, the tool call results are returned as response when tool calls are made. + So it is recommended to pay attention to the formatting of the tools return values, + especially if another agent is expecting them in a specific format. + Use `tool_call_summary_format` to customize the tool call summary, if needed. + Hand off behavior: * If a handoff is triggered, a :class:`~autogen_agentchat.messages.HandoffMessage` will be returned in :attr:`~autogen_agentchat.base.Response.chat_message`. @@ -94,18 +104,15 @@ class AssistantAgent(BaseChatAgent): .. note:: - The assistant agent is not thread-safe or coroutine-safe. - It should not be shared between multiple tasks or coroutines, and it should - not call its methods concurrently. + If multiple handoffs are detected, only the first handoff is executed. - .. note:: - By default, the tool call results are returned as response when tool calls are made. - So it is recommended to pay attention to the formatting of the tools return values, - especially if another agent is expecting them in a specific format. - Use `tool_call_summary_format` to customize the tool call summary, if needed. - .. note:: - If multiple handoffs are detected, only the first handoff is executed. + Limit context size sent to the model: + + You can limit the number of messages sent to the model by setting + the `model_context` parameter to a :class:`~autogen_core.model_context.BufferedChatCompletionContext`. + This will limit the number of recent messages sent to the model and can be useful + when the model has a limit on the number of tokens it can process. Args: diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index 70811b062bf..5b21cfffbfd 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -113,42 +113,13 @@ "```{note}\n", "Unlike in v0.2 AgentChat, the tools are executed by the same agent directly within\n", "the same call to {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`.\n", - "```\n", - "\n", - "## User Proxy Agent\n", - "\n", - "{py:class}`~autogen_agentchat.agents.UserProxyAgent` is a built-in agent that\n", - "provides one way for a user to intervene in the process. This agent will put the team in a temporary blocking state, and thus any exceptions or runtime failures while in the blocked state will result in a deadlock. It is strongly advised that this agent be coupled with a timeout mechanic and that all errors and exceptions emanating from it are handled." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import UserProxyAgent\n", - "\n", - "\n", - "async def user_proxy_run() -> None:\n", - " user_proxy_agent = UserProxyAgent(\"user_proxy\")\n", - " response = await user_proxy_agent.on_messages(\n", - " [TextMessage(content=\"What is your name? \", source=\"user\")], cancellation_token=CancellationToken()\n", - " )\n", - " print(f\"Your name is {response.chat_message.content}\")\n", - "\n", - "\n", - "# Use asyncio.run(user_proxy_run()) when running in a script.\n", - "await user_proxy_run()" + "```" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The User Proxy agent is ideally used for on-demand human-in-the-loop interactions for scenarios such as Just In Time approvals, human feedback, alerts, etc. For slower user interactions, consider terminating a team using a termination condition and start another one from\n", - "{py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream` with another message.\n", - "\n", "## Streaming Messages\n", "\n", "We can also stream each message as it is generated by the agent by using the\n", @@ -227,6 +198,81 @@ "For more information on tool calling, refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use)." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Use Model Context\n", + "\n", + "{py:class}`~autogen_agentchat.agents.AssistantAgent` has a `model_context`\n", + "parameter that can be used to pass in a {py:class}`~autogen_core.model_context.ChatCompletionContext`\n", + "object. This allows the agent to use different model contexts, such as\n", + "{py:class}`~autogen_core.model_context.BufferedChatCompletionContext` to\n", + "limit the context sent to the model.\n", + "\n", + "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` uses\n", + "the {py:class}`~autogen_core.model_context.UnboundedChatCompletionContext`\n", + "which sends the full conversation history to the model. To limit the context\n", + "to the last `n` messages, you can use the {py:class}`~autogen_core.model_context.BufferedChatCompletionContext`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core.model_context import BufferedChatCompletionContext\n", + "\n", + "# Create an agent that uses only the last 5 messages in the context to generate responses.\n", + "agent = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + " model_context=BufferedChatCompletionContext(buffer_size=5), # Only use the last 5 messages in the context.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## User Proxy Agent\n", + "\n", + "{py:class}`~autogen_agentchat.agents.UserProxyAgent` is a built-in agent that\n", + "provides one way for a user to intervene in the process. This agent will put the team in a temporary blocking state, and thus any exceptions or runtime failures while in the blocked state will result in a deadlock. It is strongly advised that this agent be coupled with a timeout mechanic and that all errors and exceptions emanating from it are handled." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import UserProxyAgent\n", + "\n", + "\n", + "async def user_proxy_run() -> None:\n", + " user_proxy_agent = UserProxyAgent(\"user_proxy\")\n", + " response = await user_proxy_agent.on_messages(\n", + " [TextMessage(content=\"What is your name? \", source=\"user\")], cancellation_token=CancellationToken()\n", + " )\n", + " print(f\"Your name is {response.chat_message.content}\")\n", + "\n", + "\n", + "# Use asyncio.run(user_proxy_run()) when running in a script.\n", + "await user_proxy_run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The User Proxy agent is ideally used for on-demand human-in-the-loop interactions for scenarios such as Just In Time approvals, human feedback, alerts, etc. For slower user interactions, consider terminating a team using a termination condition and start another one from\n", + "{py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream` with another message." + ] + }, { "cell_type": "markdown", "metadata": {}, From 81fceaea8091f3687d47e4c5a542869a79caf8bb Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sat, 28 Dec 2024 21:40:53 -0800 Subject: [PATCH 02/11] add langchain tools --- .../tutorial/agents.ipynb | 88 ++++++++++++++++--- 1 file changed, 78 insertions(+), 10 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index 5b21cfffbfd..7f0fed2c0d5 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -26,12 +26,13 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "from autogen_agentchat.agents import AssistantAgent\n", "from autogen_agentchat.messages import TextMessage\n", + "from autogen_agentchat.ui import Console\n", "from autogen_core import CancellationToken\n", "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", @@ -154,9 +155,6 @@ } ], "source": [ - "from autogen_agentchat.ui import Console\n", - "\n", - "\n", "async def assistant_run_stream() -> None:\n", " # Option 1: read each message from the stream (as shown in the previous example).\n", " # async for message in agent.on_messages_stream(\n", @@ -187,22 +185,92 @@ "with the final item being the response message in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute.\n", "\n", "From the messages, you can observe that the assistant agent utilized the `web_search` tool to\n", - "gather information and responded based on the search results.\n", + "gather information and responded based on the search results." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Tools\n", + "\n", + "Large Language Models (LLMs) are typically limited to generating text or code responses. \n", + "However, many complex tasks benefit from the ability to use external tools that perform specific actions,\n", + "such as fetching data from APIs or databases.\n", + "\n", + "To address this limitation, modern LLMs can now accept a list of available tool schemas \n", + "(descriptions of tools and their arguments) and generate a tool call message. \n", + "This capability is known as **Tool Calling** or **Function Calling** and \n", + "is becoming a popular pattern in building intelligent agent-based applications.\n", + "Refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) \n", + "and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information about tool calling in LLMs.\n", "\n", - "## Understanding Tool Calling\n", + "In AgentChat, the assistant agent can use tools to perform specific actions.\n", + "The `web_search` tool is one such tool that allows the assistant agent to search the web for information.\n", + "A custom tool can be a Python function or a subclass of the {py:class}`~autogen_core.tools.BaseTool`.\n", "\n", - "Large Language Models (LLMs) are typically limited to generating text or code responses. However, many complex tasks benefit from the ability to use external tools that perform specific actions, such as fetching data from APIs or databases.\n", + "### Langchain Tools\n", "\n", - "To address this limitation, modern LLMs can now accept a list of available tool schemas (descriptions of tools and their arguments) and generate a tool call message. This capability is known as **Tool Calling** or **Function Calling** and is becoming a popular pattern in building intelligent agent-based applications.\n", + "In addition to custom tools, you can also use tools from the Langchain library\n", + "by wrapping them in {py:class}`~autogen_ext.tools.langchain.LangChainToolAdaptor`." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "What's the average age of the passengers?\n", + "---------- assistant ----------\n", + "[FunctionCall(id='call_L7mDlKyZF8kyJ518IXRJXIR3', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')]\n", + "[Prompt tokens: 111, Completion tokens: 21]\n", + "---------- assistant ----------\n", + "[FunctionExecutionResult(content='29.69911764705882', call_id='call_L7mDlKyZF8kyJ518IXRJXIR3')]\n", + "---------- assistant ----------\n", + "29.69911764705882\n", + "---------- Summary ----------\n", + "Number of messages: 4\n", + "Finish reason: None\n", + "Total prompt tokens: 111\n", + "Total completion tokens: 21\n", + "Duration: 0.64 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content=\"What's the average age of the passengers?\", type='TextMessage'), ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=111, completion_tokens=21), content=[FunctionCall(id='call_L7mDlKyZF8kyJ518IXRJXIR3', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='29.69911764705882', call_id='call_L7mDlKyZF8kyJ518IXRJXIR3')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='assistant', models_usage=None, content='29.69911764705882', type='ToolCallSummaryMessage')], stop_reason=None)" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import pandas as pd\n", + "from autogen_ext.tools.langchain import LangChainToolAdapter\n", + "from langchain_experimental.tools.python.tool import PythonAstREPLTool\n", "\n", - "For more information on tool calling, refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use)." + "df = pd.read_csv(\"https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv\")\n", + "tool = LangChainToolAdapter(PythonAstREPLTool(locals={\"df\": df}))\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "agent = AssistantAgent(\n", + " \"assistant\", tools=[tool], model_client=model_client, system_message=\"Use the `df` variable to access the dataset.\"\n", + ")\n", + "await Console(agent.run_stream(task=\"What's the average age of the passengers?\"))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use Model Context\n", + "## Using Model Context\n", "\n", "{py:class}`~autogen_agentchat.agents.AssistantAgent` has a `model_context`\n", "parameter that can be used to pass in a {py:class}`~autogen_core.model_context.ChatCompletionContext`\n", From ba3e47e45ba4c6750f464104fea1b45427f6e2e5 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sat, 28 Dec 2024 21:46:03 -0800 Subject: [PATCH 03/11] update langchain tool wrapper api doc --- .../tools/langchain/_langchain_adapter.py | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py index ea657691d94..8211e7140c0 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py @@ -22,6 +22,38 @@ class LangChainToolAdapter(BaseTool[BaseModel, Any]): Args: langchain_tool (LangChainTool): A LangChain tool to wrap + + Examples: + + Use the `PythonAstREPLTool` from the `langchain_experimental` package to + create a tool that allows you to interact with a Pandas DataFrame. + + .. code-block:: python + + import asyncio + import pandas as pd + from langchain_experimental.tools.python.tool import PythonAstREPLTool + from autogen_ext.tools.langchain import LangChainToolAdapter + from autogen_ext.models.openai import OpenAIChatCompletionClient + from autogen_agentchat.agents import AssistantAgent + from autogen_agentchat.ui import Console + + + async def main() -> None: + df = pd.read_csv("https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv") + tool = LangChainToolAdapter(PythonAstREPLTool(locals={"df": df})) + model_client = OpenAIChatCompletionClient(model="gpt-4o") + agent = AssistantAgent( + "assistant", + tools=[tool], + model_client=model_client, + system_message="Use the `df` variable to access the dataset.", + ) + await Console(agent.run_stream(task="What's the average age of the passengers?")) + + + asyncio.run(main()) + """ def __init__(self, langchain_tool: LangChainTool): From 40ee265e966ec3fcb2e1b5dcfde3cc95b52c4814 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sat, 28 Dec 2024 21:49:23 -0800 Subject: [PATCH 04/11] updat --- .../src/user-guide/agentchat-user-guide/tutorial/agents.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index 7f0fed2c0d5..c45632158e1 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -212,7 +212,7 @@ "### Langchain Tools\n", "\n", "In addition to custom tools, you can also use tools from the Langchain library\n", - "by wrapping them in {py:class}`~autogen_ext.tools.langchain.LangChainToolAdaptor`." + "by wrapping them in {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter`." ] }, { From 6f3b074e37fbca178bbd78a8c954c5d0b6eee144 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sat, 28 Dec 2024 21:52:22 -0800 Subject: [PATCH 05/11] update --- .../tutorial/agents.ipynb | 23 ++++++++----------- .../tools/langchain/_langchain_adapter.py | 8 ++++++- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index c45632158e1..fbefdc85ace 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -217,37 +217,34 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "---------- user ----------\n", - "What's the average age of the passengers?\n", "---------- assistant ----------\n", - "[FunctionCall(id='call_L7mDlKyZF8kyJ518IXRJXIR3', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')]\n", - "[Prompt tokens: 111, Completion tokens: 21]\n", + "[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')]\n", + "[Prompt tokens: 111, Completion tokens: 22]\n", "---------- assistant ----------\n", - "[FunctionExecutionResult(content='29.69911764705882', call_id='call_L7mDlKyZF8kyJ518IXRJXIR3')]\n", + "[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')]\n", "---------- assistant ----------\n", "29.69911764705882\n", "---------- Summary ----------\n", - "Number of messages: 4\n", - "Finish reason: None\n", + "Number of inner messages: 2\n", "Total prompt tokens: 111\n", - "Total completion tokens: 21\n", - "Duration: 0.64 seconds\n" + "Total completion tokens: 22\n", + "Duration: 0.62 seconds\n" ] }, { "data": { "text/plain": [ - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content=\"What's the average age of the passengers?\", type='TextMessage'), ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=111, completion_tokens=21), content=[FunctionCall(id='call_L7mDlKyZF8kyJ518IXRJXIR3', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='29.69911764705882', call_id='call_L7mDlKyZF8kyJ518IXRJXIR3')], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='assistant', models_usage=None, content='29.69911764705882', type='ToolCallSummaryMessage')], stop_reason=None)" + "Response(chat_message=ToolCallSummaryMessage(source='assistant', models_usage=None, content='29.69911764705882', type='ToolCallSummaryMessage'), inner_messages=[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=111, completion_tokens=22), content=[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')], type='ToolCallExecutionEvent')])" ] }, - "execution_count": 4, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -263,7 +260,7 @@ "agent = AssistantAgent(\n", " \"assistant\", tools=[tool], model_client=model_client, system_message=\"Use the `df` variable to access the dataset.\"\n", ")\n", - "await Console(agent.run_stream(task=\"What's the average age of the passengers?\"))" + "await Console(agent.on_messages_stream([TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()))" ] }, { diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py index 8211e7140c0..2de0371515b 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py @@ -35,8 +35,10 @@ class LangChainToolAdapter(BaseTool[BaseModel, Any]): from langchain_experimental.tools.python.tool import PythonAstREPLTool from autogen_ext.tools.langchain import LangChainToolAdapter from autogen_ext.models.openai import OpenAIChatCompletionClient + from autogen_agentchat.messages import TextMessage from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.ui import Console + from autogen_core import CancellationToken async def main() -> None: @@ -49,7 +51,11 @@ async def main() -> None: model_client=model_client, system_message="Use the `df` variable to access the dataset.", ) - await Console(agent.run_stream(task="What's the average age of the passengers?")) + await Console( + agent.on_messages_stream( + [TextMessage(content="What's the average age of the passengers?", source="user")], CancellationToken() + ) + ) asyncio.run(main()) From 5cd1229d87176b35afbc042272e39ed6d029462f Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sat, 28 Dec 2024 22:07:40 -0800 Subject: [PATCH 06/11] format --- .../user-guide/agentchat-user-guide/tutorial/agents.ipynb | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index fbefdc85ace..85d9dc360d7 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -260,7 +260,11 @@ "agent = AssistantAgent(\n", " \"assistant\", tools=[tool], model_client=model_client, system_message=\"Use the `df` variable to access the dataset.\"\n", ")\n", - "await Console(agent.on_messages_stream([TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()))" + "await Console(\n", + " agent.on_messages_stream(\n", + " [TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()\n", + " )\n", + ")" ] }, { From fba254b31b205d31dea9a46822ae2d719a86146c Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sat, 28 Dec 2024 22:21:27 -0800 Subject: [PATCH 07/11] add langchain experimental dev dep --- python/packages/autogen-ext/pyproject.toml | 3 +- python/uv.lock | 82 ++++++++++++++++++++-- 2 files changed, 80 insertions(+), 5 deletions(-) diff --git a/python/packages/autogen-ext/pyproject.toml b/python/packages/autogen-ext/pyproject.toml index 34a71f917ca..10e890ff80e 100644 --- a/python/packages/autogen-ext/pyproject.toml +++ b/python/packages/autogen-ext/pyproject.toml @@ -58,7 +58,8 @@ packages = ["src/autogen_ext"] [dependency-groups] dev = [ - "autogen_test_utils" + "autogen_test_utils", + "langchain-experimental", ] [tool.ruff] diff --git a/python/uv.lock b/python/uv.lock index a5fe187da54..96fc2651b11 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -517,6 +517,7 @@ web-surfer = [ [package.dev-dependencies] dev = [ { name = "autogen-test-utils" }, + { name = "langchain-experimental" }, ] [package.metadata] @@ -546,7 +547,10 @@ requires-dist = [ ] [package.metadata.requires-dev] -dev = [{ name = "autogen-test-utils", editable = "packages/autogen-test-utils" }] +dev = [ + { name = "autogen-test-utils", editable = "packages/autogen-test-utils" }, + { name = "langchain-experimental" }, +] [[package]] name = "autogen-magentic-one" @@ -1739,9 +1743,54 @@ wheels = [ ] [[package]] -name = "langchain-core" +name = "langchain" +version = "0.3.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "async-timeout", marker = "python_full_version < '3.11'" }, + { name = "langchain-core" }, + { name = "langchain-text-splitters" }, + { name = "langsmith" }, + { name = "numpy" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, + { name = "tenacity" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/77/6850ca959114aa1e63a4aa3298d9d5886eccedfa6b1d8a50f59d9e6ee328/langchain-0.3.13.tar.gz", hash = "sha256:3d618a8e7e496704dc4407d224218ff28baf9416c1241e7bdcdd117e6c70daa8", size = 420308 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/d4/f70ddcd14a1c9401367c601380e1204c6cfc948d7a46a4a274d3c03f1526/langchain-0.3.13-py3-none-any.whl", hash = "sha256:dd3549a28dc28b5d4769122b60fad0890aee86928d1bdbba3f9349d9f023315d", size = 1009093 }, +] + +[[package]] +name = "langchain-community" version = "0.3.13" source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "dataclasses-json" }, + { name = "httpx-sse" }, + { name = "langchain" }, + { name = "langchain-core" }, + { name = "langsmith" }, + { name = "numpy" }, + { name = "pydantic-settings" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, + { name = "tenacity" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/47/d0/37ed3d950f99fd2f24ed9e71a4a30adede0574cd45e8245051404d468689/langchain_community-0.3.13.tar.gz", hash = "sha256:8abe05b4ab160018dbd368f7b4dc79ef6a66178a461b47c4ce401d430337e0c2", size = 1717819 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/a7/b9f3cd12510fe9a5fe2dcd7f12d095b0d5bd95fb2cd9c5362de45ebc18f9/langchain_community-0.3.13-py3-none-any.whl", hash = "sha256:d6f623d59d44ea85b9cafac8ca6e9970c343bf892a9a3252b3f25e30df339be2", size = 2500072 }, +] + +[[package]] +name = "langchain-core" +version = "0.3.28" +source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jsonpatch" }, { name = "langsmith" }, @@ -1751,9 +1800,22 @@ dependencies = [ { name = "tenacity" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/02/d6/5a16b853a19ba0200dbf77010d9a3b3effc84c62bc952fff7bf81e90f9d8/langchain_core-0.3.13.tar.gz", hash = "sha256:d3a6c838284ff73705dd0f24a36cd8b2fa34a348e6b357e6b3d58199ab063cde", size = 327206 } +sdist = { url = "https://files.pythonhosted.org/packages/de/f2/1787f9e7fcf6bee70f7a8f488ee95b814408706ab35d61dbba279fbe7361/langchain_core-0.3.28.tar.gz", hash = "sha256:407f7607e6b3c0ebfd6094da95d39b701e22e59966698ef126799782953e7f2c", size = 330743 } wheels = [ - { url = "https://files.pythonhosted.org/packages/38/53/b5436750c392370cff44f8e3669a4886fa18579ad0ce33a505f8f261c1a0/langchain_core-0.3.13-py3-none-any.whl", hash = "sha256:e79cfac046cab293c02047f081741f4a433ca5aa54a3973e179eaef147cdfba4", size = 408049 }, + { url = "https://files.pythonhosted.org/packages/9a/bf/07e63d4b4c41aa49daf4a4499e8010928ce8545469f0265544f925c95fff/langchain_core-0.3.28-py3-none-any.whl", hash = "sha256:a02f81ca53a8eed757133797e5a602ca80c1324bbecb0c5d86ef7bd3d6625372", size = 411553 }, +] + +[[package]] +name = "langchain-experimental" +version = "0.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-community" }, + { name = "langchain-core" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/27/56/a8acbb08a03383c28875b3b151e4cefea5612266917fbd6fc3c14c21e172/langchain_experimental-0.3.4.tar.gz", hash = "sha256:937c4259ee4a639c618d19acf0e2c5c2898ef127050346edc5655259aa281a21", size = 140532 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/27/fe8caa4884611286b1f7d6c5cfd76e1fef188faaa946db4fde6daa1cd2cd/langchain_experimental-0.3.4-py3-none-any.whl", hash = "sha256:2e587306aea36b60fa5e5fc05dc7281bee9f60a806f0bf9d30916e0ee096af80", size = 209154 }, ] [[package]] @@ -1770,6 +1832,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/66/ea/dcc59d9b818a4d7f25d4d6b3018355a0e0243a351b1d4ef8b26ec107ee00/langchain_openai-0.2.3-py3-none-any.whl", hash = "sha256:f498c94817c980cb302439b95d3f3275cdf2743e022ee674692c75898523cf57", size = 49907 }, ] +[[package]] +name = "langchain-text-splitters" +version = "0.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/10/a5/215816cc376c4f3853c961ae98022a6d57e74f6859c206f29716173e9a73/langchain_text_splitters-0.3.4.tar.gz", hash = "sha256:f3cedea469684483b4492d9f11dc2fa66388dab01c5d5c5307925515ab884c24", size = 22205 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/37/b2c971dfea62675f640e0dd5a078ca502885059276d21b7143cf1fe13e82/langchain_text_splitters-0.3.4-py3-none-any.whl", hash = "sha256:432fdb39c161d4d0db16d61d38af068dc5dd4dd08082febd2fced81304b2725c", size = 27783 }, +] + [[package]] name = "langgraph" version = "0.2.39" From 03d293744536496a32985effc3b10e00128375b4 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sat, 28 Dec 2024 22:46:38 -0800 Subject: [PATCH 08/11] type --- .../src/autogen_ext/tools/langchain/_langchain_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py index 2de0371515b..85b21053794 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py @@ -42,7 +42,7 @@ class LangChainToolAdapter(BaseTool[BaseModel, Any]): async def main() -> None: - df = pd.read_csv("https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv") + df = pd.read_csv("https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv") # type: ignore tool = LangChainToolAdapter(PythonAstREPLTool(locals={"df": df})) model_client = OpenAIChatCompletionClient(model="gpt-4o") agent = AssistantAgent( From 197e3620a282cb2fe77518047294ec0e8b119b35 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sun, 29 Dec 2024 14:51:51 -0800 Subject: [PATCH 09/11] Fix type --- .../src/autogen_ext/tools/langchain/_langchain_adapter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py index 85b21053794..f3e6c43dfc4 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py @@ -9,7 +9,7 @@ from pydantic import BaseModel, Field, create_model if TYPE_CHECKING: - from langchain_core.tools import Tool as LangChainTool + from langchain_core.tools import BaseTool as LangChainTool class LangChainToolAdapter(BaseTool[BaseModel, Any]): From a8dcbc12e9f5d62e7f1c03aa39898db15f316c36 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sun, 29 Dec 2024 15:08:19 -0800 Subject: [PATCH 10/11] Fix some types in langchain adapter --- .../tools/langchain/_langchain_adapter.py | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py index f3e6c43dfc4..ee653148054 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py @@ -10,6 +10,7 @@ if TYPE_CHECKING: from langchain_core.tools import BaseTool as LangChainTool + from langchain_core.tools import Tool as LangChainFunctionTool class LangChainToolAdapter(BaseTool[BaseModel, Any]): @@ -70,22 +71,26 @@ def __init__(self, langchain_tool: LangChainTool): description = self._langchain_tool.description or "" # Determine the callable method - if hasattr(self._langchain_tool, "func") and callable(self._langchain_tool.func): - assert self._langchain_tool.func is not None - self._callable: Callable[..., Any] = self._langchain_tool.func - elif hasattr(self._langchain_tool, "_run") and callable(self._langchain_tool._run): # pyright: ignore - self._callable: Callable[..., Any] = self._langchain_tool._run # type: ignore + self._callable: Callable[..., Any] + if isinstance(self._langchain_tool, LangChainFunctionTool): + if not ( + hasattr(self._langchain_tool, "func") + and self._langchain_tool.func is not None + and callable(self._langchain_tool.func) + ): + raise ValueError("The LangChain function tool must have a callable method named `func`") + self._callable = self._langchain_tool.func else: - raise AttributeError( - f"The provided LangChain tool '{name}' does not have a callable 'func' or '_run' method." - ) + if not (hasattr(self._langchain_tool, "_run") and callable(self._langchain_tool._run)): # type: ignore + raise ValueError("The LangChain tool must have a callable method named `_run`") + self._callable = self._langchain_tool._run # type: ignore # Determine args_type - if self._langchain_tool.args_schema: # pyright: ignore - args_type = self._langchain_tool.args_schema # pyright: ignore + if self._langchain_tool.args_schema: # type: ignore + args_type = self._langchain_tool.args_schema # type: ignore else: # Infer args_type from the callable's signature - sig = inspect.signature(cast(Callable[..., Any], self._callable)) # type: ignore + sig = inspect.signature(cast(Callable[..., Any], self._callable)) fields = { k: (v.annotation, Field(...)) for k, v in sig.parameters.items() From 7c738f5163ba950bd6d87b83d3d6f98161c82eb2 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sun, 29 Dec 2024 19:51:01 -0800 Subject: [PATCH 11/11] type ignores --- .../tools/langchain/_langchain_adapter.py | 27 ++++++++----------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py index ee653148054..86d6f156f13 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py @@ -10,7 +10,6 @@ if TYPE_CHECKING: from langchain_core.tools import BaseTool as LangChainTool - from langchain_core.tools import Tool as LangChainFunctionTool class LangChainToolAdapter(BaseTool[BaseModel, Any]): @@ -71,26 +70,22 @@ def __init__(self, langchain_tool: LangChainTool): description = self._langchain_tool.description or "" # Determine the callable method - self._callable: Callable[..., Any] - if isinstance(self._langchain_tool, LangChainFunctionTool): - if not ( - hasattr(self._langchain_tool, "func") - and self._langchain_tool.func is not None - and callable(self._langchain_tool.func) - ): - raise ValueError("The LangChain function tool must have a callable method named `func`") - self._callable = self._langchain_tool.func + if hasattr(self._langchain_tool, "func") and callable(self._langchain_tool.func): # type: ignore + assert self._langchain_tool.func is not None # type: ignore + self._callable: Callable[..., Any] = self._langchain_tool.func # type: ignore + elif hasattr(self._langchain_tool, "_run") and callable(self._langchain_tool._run): # type: ignore + self._callable: Callable[..., Any] = self._langchain_tool._run # type: ignore else: - if not (hasattr(self._langchain_tool, "_run") and callable(self._langchain_tool._run)): # type: ignore - raise ValueError("The LangChain tool must have a callable method named `_run`") - self._callable = self._langchain_tool._run # type: ignore + raise AttributeError( + f"The provided LangChain tool '{name}' does not have a callable 'func' or '_run' method." + ) # Determine args_type - if self._langchain_tool.args_schema: # type: ignore - args_type = self._langchain_tool.args_schema # type: ignore + if self._langchain_tool.args_schema: # pyright: ignore + args_type = self._langchain_tool.args_schema # pyright: ignore else: # Infer args_type from the callable's signature - sig = inspect.signature(cast(Callable[..., Any], self._callable)) + sig = inspect.signature(cast(Callable[..., Any], self._callable)) # type: ignore fields = { k: (v.annotation, Field(...)) for k, v in sig.parameters.items()