From a20ec102d38f8852435736def11ce3aaa881c82c Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Mon, 30 Dec 2024 09:09:33 -0800 Subject: [PATCH] AgentChat tutorial update to include model context usage and langchain tool (#4843) * Doc update to include model context usage * add langchain tools * update langchain tool wrapper api doc * updat * update * format * add langchain experimental dev dep * type * Fix type * Fix some types in langchain adapter * type ignores --- .../agents/_assistant_agent.py | 29 ++- .../tutorial/agents.ipynb | 193 ++++++++++++++---- python/packages/autogen-ext/pyproject.toml | 3 +- .../tools/langchain/_langchain_adapter.py | 48 ++++- python/uv.lock | 82 +++++++- 5 files changed, 295 insertions(+), 60 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 3b92a4a51b03..5fc5b8c54cd0 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -72,7 +72,6 @@ class AssistantAgent(BaseChatAgent): the inner messages as they are created, and the :class:`~autogen_agentchat.base.Response` object as the last item before closing the generator. - .. note:: The caller must only pass the new messages to the agent on each call @@ -80,6 +79,11 @@ class AssistantAgent(BaseChatAgent): The agent maintains its state between calls to these methods. Do not pass the entire conversation history to the agent on each call. + .. note:: + The assistant agent is not thread-safe or coroutine-safe. + It should not be shared between multiple tasks or coroutines, and it should + not call its methods concurrently. + Tool call behavior: * If the model returns no tool call, then the response is immediately returned as a :class:`~autogen_agentchat.messages.TextMessage` in :attr:`~autogen_agentchat.base.Response.chat_message`. @@ -87,6 +91,12 @@ class AssistantAgent(BaseChatAgent): - When `reflect_on_tool_use` is False (default), the tool call results are returned as a :class:`~autogen_agentchat.messages.ToolCallSummaryMessage` in :attr:`~autogen_agentchat.base.Response.chat_message`. `tool_call_summary_format` can be used to customize the tool call summary. - When `reflect_on_tool_use` is True, the another model inference is made using the tool calls and results, and the text response is returned as a :class:`~autogen_agentchat.messages.TextMessage` in :attr:`~autogen_agentchat.base.Response.chat_message`. + .. note:: + By default, the tool call results are returned as response when tool calls are made. + So it is recommended to pay attention to the formatting of the tools return values, + especially if another agent is expecting them in a specific format. + Use `tool_call_summary_format` to customize the tool call summary, if needed. + Hand off behavior: * If a handoff is triggered, a :class:`~autogen_agentchat.messages.HandoffMessage` will be returned in :attr:`~autogen_agentchat.base.Response.chat_message`. @@ -94,18 +104,15 @@ class AssistantAgent(BaseChatAgent): .. note:: - The assistant agent is not thread-safe or coroutine-safe. - It should not be shared between multiple tasks or coroutines, and it should - not call its methods concurrently. + If multiple handoffs are detected, only the first handoff is executed. - .. note:: - By default, the tool call results are returned as response when tool calls are made. - So it is recommended to pay attention to the formatting of the tools return values, - especially if another agent is expecting them in a specific format. - Use `tool_call_summary_format` to customize the tool call summary, if needed. - .. note:: - If multiple handoffs are detected, only the first handoff is executed. + Limit context size sent to the model: + + You can limit the number of messages sent to the model by setting + the `model_context` parameter to a :class:`~autogen_core.model_context.BufferedChatCompletionContext`. + This will limit the number of recent messages sent to the model and can be useful + when the model has a limit on the number of tokens it can process. Args: diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index 70811b062bf6..85d9dc360d72 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -26,12 +26,13 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "from autogen_agentchat.agents import AssistantAgent\n", "from autogen_agentchat.messages import TextMessage\n", + "from autogen_agentchat.ui import Console\n", "from autogen_core import CancellationToken\n", "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", @@ -113,42 +114,13 @@ "```{note}\n", "Unlike in v0.2 AgentChat, the tools are executed by the same agent directly within\n", "the same call to {py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages`.\n", - "```\n", - "\n", - "## User Proxy Agent\n", - "\n", - "{py:class}`~autogen_agentchat.agents.UserProxyAgent` is a built-in agent that\n", - "provides one way for a user to intervene in the process. This agent will put the team in a temporary blocking state, and thus any exceptions or runtime failures while in the blocked state will result in a deadlock. It is strongly advised that this agent be coupled with a timeout mechanic and that all errors and exceptions emanating from it are handled." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_agentchat.agents import UserProxyAgent\n", - "\n", - "\n", - "async def user_proxy_run() -> None:\n", - " user_proxy_agent = UserProxyAgent(\"user_proxy\")\n", - " response = await user_proxy_agent.on_messages(\n", - " [TextMessage(content=\"What is your name? \", source=\"user\")], cancellation_token=CancellationToken()\n", - " )\n", - " print(f\"Your name is {response.chat_message.content}\")\n", - "\n", - "\n", - "# Use asyncio.run(user_proxy_run()) when running in a script.\n", - "await user_proxy_run()" + "```" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The User Proxy agent is ideally used for on-demand human-in-the-loop interactions for scenarios such as Just In Time approvals, human feedback, alerts, etc. For slower user interactions, consider terminating a team using a termination condition and start another one from\n", - "{py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream` with another message.\n", - "\n", "## Streaming Messages\n", "\n", "We can also stream each message as it is generated by the agent by using the\n", @@ -183,9 +155,6 @@ } ], "source": [ - "from autogen_agentchat.ui import Console\n", - "\n", - "\n", "async def assistant_run_stream() -> None:\n", " # Option 1: read each message from the stream (as shown in the previous example).\n", " # async for message in agent.on_messages_stream(\n", @@ -216,15 +185,161 @@ "with the final item being the response message in the {py:attr}`~autogen_agentchat.base.Response.chat_message` attribute.\n", "\n", "From the messages, you can observe that the assistant agent utilized the `web_search` tool to\n", - "gather information and responded based on the search results.\n", + "gather information and responded based on the search results." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Tools\n", "\n", - "## Understanding Tool Calling\n", + "Large Language Models (LLMs) are typically limited to generating text or code responses. \n", + "However, many complex tasks benefit from the ability to use external tools that perform specific actions,\n", + "such as fetching data from APIs or databases.\n", "\n", - "Large Language Models (LLMs) are typically limited to generating text or code responses. However, many complex tasks benefit from the ability to use external tools that perform specific actions, such as fetching data from APIs or databases.\n", + "To address this limitation, modern LLMs can now accept a list of available tool schemas \n", + "(descriptions of tools and their arguments) and generate a tool call message. \n", + "This capability is known as **Tool Calling** or **Function Calling** and \n", + "is becoming a popular pattern in building intelligent agent-based applications.\n", + "Refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) \n", + "and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use) for more information about tool calling in LLMs.\n", "\n", - "To address this limitation, modern LLMs can now accept a list of available tool schemas (descriptions of tools and their arguments) and generate a tool call message. This capability is known as **Tool Calling** or **Function Calling** and is becoming a popular pattern in building intelligent agent-based applications.\n", + "In AgentChat, the assistant agent can use tools to perform specific actions.\n", + "The `web_search` tool is one such tool that allows the assistant agent to search the web for information.\n", + "A custom tool can be a Python function or a subclass of the {py:class}`~autogen_core.tools.BaseTool`.\n", "\n", - "For more information on tool calling, refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use)." + "### Langchain Tools\n", + "\n", + "In addition to custom tools, you can also use tools from the Langchain library\n", + "by wrapping them in {py:class}`~autogen_ext.tools.langchain.LangChainToolAdapter`." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- assistant ----------\n", + "[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')]\n", + "[Prompt tokens: 111, Completion tokens: 22]\n", + "---------- assistant ----------\n", + "[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')]\n", + "---------- assistant ----------\n", + "29.69911764705882\n", + "---------- Summary ----------\n", + "Number of inner messages: 2\n", + "Total prompt tokens: 111\n", + "Total completion tokens: 22\n", + "Duration: 0.62 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "Response(chat_message=ToolCallSummaryMessage(source='assistant', models_usage=None, content='29.69911764705882', type='ToolCallSummaryMessage'), inner_messages=[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=111, completion_tokens=22), content=[FunctionCall(id='call_BEYRkf53nBS1G2uG60wHP0zf', arguments='{\"query\":\"df[\\'Age\\'].mean()\"}', name='python_repl_ast')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='29.69911764705882', call_id='call_BEYRkf53nBS1G2uG60wHP0zf')], type='ToolCallExecutionEvent')])" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import pandas as pd\n", + "from autogen_ext.tools.langchain import LangChainToolAdapter\n", + "from langchain_experimental.tools.python.tool import PythonAstREPLTool\n", + "\n", + "df = pd.read_csv(\"https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv\")\n", + "tool = LangChainToolAdapter(PythonAstREPLTool(locals={\"df\": df}))\n", + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "agent = AssistantAgent(\n", + " \"assistant\", tools=[tool], model_client=model_client, system_message=\"Use the `df` variable to access the dataset.\"\n", + ")\n", + "await Console(\n", + " agent.on_messages_stream(\n", + " [TextMessage(content=\"What's the average age of the passengers?\", source=\"user\")], CancellationToken()\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Model Context\n", + "\n", + "{py:class}`~autogen_agentchat.agents.AssistantAgent` has a `model_context`\n", + "parameter that can be used to pass in a {py:class}`~autogen_core.model_context.ChatCompletionContext`\n", + "object. This allows the agent to use different model contexts, such as\n", + "{py:class}`~autogen_core.model_context.BufferedChatCompletionContext` to\n", + "limit the context sent to the model.\n", + "\n", + "By default, {py:class}`~autogen_agentchat.agents.AssistantAgent` uses\n", + "the {py:class}`~autogen_core.model_context.UnboundedChatCompletionContext`\n", + "which sends the full conversation history to the model. To limit the context\n", + "to the last `n` messages, you can use the {py:class}`~autogen_core.model_context.BufferedChatCompletionContext`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core.model_context import BufferedChatCompletionContext\n", + "\n", + "# Create an agent that uses only the last 5 messages in the context to generate responses.\n", + "agent = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + " model_context=BufferedChatCompletionContext(buffer_size=5), # Only use the last 5 messages in the context.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## User Proxy Agent\n", + "\n", + "{py:class}`~autogen_agentchat.agents.UserProxyAgent` is a built-in agent that\n", + "provides one way for a user to intervene in the process. This agent will put the team in a temporary blocking state, and thus any exceptions or runtime failures while in the blocked state will result in a deadlock. It is strongly advised that this agent be coupled with a timeout mechanic and that all errors and exceptions emanating from it are handled." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import UserProxyAgent\n", + "\n", + "\n", + "async def user_proxy_run() -> None:\n", + " user_proxy_agent = UserProxyAgent(\"user_proxy\")\n", + " response = await user_proxy_agent.on_messages(\n", + " [TextMessage(content=\"What is your name? \", source=\"user\")], cancellation_token=CancellationToken()\n", + " )\n", + " print(f\"Your name is {response.chat_message.content}\")\n", + "\n", + "\n", + "# Use asyncio.run(user_proxy_run()) when running in a script.\n", + "await user_proxy_run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The User Proxy agent is ideally used for on-demand human-in-the-loop interactions for scenarios such as Just In Time approvals, human feedback, alerts, etc. For slower user interactions, consider terminating a team using a termination condition and start another one from\n", + "{py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream` with another message." ] }, { diff --git a/python/packages/autogen-ext/pyproject.toml b/python/packages/autogen-ext/pyproject.toml index 34a71f917ca7..10e890ff80ec 100644 --- a/python/packages/autogen-ext/pyproject.toml +++ b/python/packages/autogen-ext/pyproject.toml @@ -58,7 +58,8 @@ packages = ["src/autogen_ext"] [dependency-groups] dev = [ - "autogen_test_utils" + "autogen_test_utils", + "langchain-experimental", ] [tool.ruff] diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py index ea657691d942..86d6f156f13a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py +++ b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/_langchain_adapter.py @@ -9,7 +9,7 @@ from pydantic import BaseModel, Field, create_model if TYPE_CHECKING: - from langchain_core.tools import Tool as LangChainTool + from langchain_core.tools import BaseTool as LangChainTool class LangChainToolAdapter(BaseTool[BaseModel, Any]): @@ -22,6 +22,44 @@ class LangChainToolAdapter(BaseTool[BaseModel, Any]): Args: langchain_tool (LangChainTool): A LangChain tool to wrap + + Examples: + + Use the `PythonAstREPLTool` from the `langchain_experimental` package to + create a tool that allows you to interact with a Pandas DataFrame. + + .. code-block:: python + + import asyncio + import pandas as pd + from langchain_experimental.tools.python.tool import PythonAstREPLTool + from autogen_ext.tools.langchain import LangChainToolAdapter + from autogen_ext.models.openai import OpenAIChatCompletionClient + from autogen_agentchat.messages import TextMessage + from autogen_agentchat.agents import AssistantAgent + from autogen_agentchat.ui import Console + from autogen_core import CancellationToken + + + async def main() -> None: + df = pd.read_csv("https://raw.githubusercontent.com/pandas-dev/pandas/main/doc/data/titanic.csv") # type: ignore + tool = LangChainToolAdapter(PythonAstREPLTool(locals={"df": df})) + model_client = OpenAIChatCompletionClient(model="gpt-4o") + agent = AssistantAgent( + "assistant", + tools=[tool], + model_client=model_client, + system_message="Use the `df` variable to access the dataset.", + ) + await Console( + agent.on_messages_stream( + [TextMessage(content="What's the average age of the passengers?", source="user")], CancellationToken() + ) + ) + + + asyncio.run(main()) + """ def __init__(self, langchain_tool: LangChainTool): @@ -32,10 +70,10 @@ def __init__(self, langchain_tool: LangChainTool): description = self._langchain_tool.description or "" # Determine the callable method - if hasattr(self._langchain_tool, "func") and callable(self._langchain_tool.func): - assert self._langchain_tool.func is not None - self._callable: Callable[..., Any] = self._langchain_tool.func - elif hasattr(self._langchain_tool, "_run") and callable(self._langchain_tool._run): # pyright: ignore + if hasattr(self._langchain_tool, "func") and callable(self._langchain_tool.func): # type: ignore + assert self._langchain_tool.func is not None # type: ignore + self._callable: Callable[..., Any] = self._langchain_tool.func # type: ignore + elif hasattr(self._langchain_tool, "_run") and callable(self._langchain_tool._run): # type: ignore self._callable: Callable[..., Any] = self._langchain_tool._run # type: ignore else: raise AttributeError( diff --git a/python/uv.lock b/python/uv.lock index a5fe187da540..96fc2651b119 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -517,6 +517,7 @@ web-surfer = [ [package.dev-dependencies] dev = [ { name = "autogen-test-utils" }, + { name = "langchain-experimental" }, ] [package.metadata] @@ -546,7 +547,10 @@ requires-dist = [ ] [package.metadata.requires-dev] -dev = [{ name = "autogen-test-utils", editable = "packages/autogen-test-utils" }] +dev = [ + { name = "autogen-test-utils", editable = "packages/autogen-test-utils" }, + { name = "langchain-experimental" }, +] [[package]] name = "autogen-magentic-one" @@ -1739,9 +1743,54 @@ wheels = [ ] [[package]] -name = "langchain-core" +name = "langchain" +version = "0.3.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "async-timeout", marker = "python_full_version < '3.11'" }, + { name = "langchain-core" }, + { name = "langchain-text-splitters" }, + { name = "langsmith" }, + { name = "numpy" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, + { name = "tenacity" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/77/6850ca959114aa1e63a4aa3298d9d5886eccedfa6b1d8a50f59d9e6ee328/langchain-0.3.13.tar.gz", hash = "sha256:3d618a8e7e496704dc4407d224218ff28baf9416c1241e7bdcdd117e6c70daa8", size = 420308 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/d4/f70ddcd14a1c9401367c601380e1204c6cfc948d7a46a4a274d3c03f1526/langchain-0.3.13-py3-none-any.whl", hash = "sha256:dd3549a28dc28b5d4769122b60fad0890aee86928d1bdbba3f9349d9f023315d", size = 1009093 }, +] + +[[package]] +name = "langchain-community" version = "0.3.13" source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "dataclasses-json" }, + { name = "httpx-sse" }, + { name = "langchain" }, + { name = "langchain-core" }, + { name = "langsmith" }, + { name = "numpy" }, + { name = "pydantic-settings" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, + { name = "tenacity" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/47/d0/37ed3d950f99fd2f24ed9e71a4a30adede0574cd45e8245051404d468689/langchain_community-0.3.13.tar.gz", hash = "sha256:8abe05b4ab160018dbd368f7b4dc79ef6a66178a461b47c4ce401d430337e0c2", size = 1717819 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/a7/b9f3cd12510fe9a5fe2dcd7f12d095b0d5bd95fb2cd9c5362de45ebc18f9/langchain_community-0.3.13-py3-none-any.whl", hash = "sha256:d6f623d59d44ea85b9cafac8ca6e9970c343bf892a9a3252b3f25e30df339be2", size = 2500072 }, +] + +[[package]] +name = "langchain-core" +version = "0.3.28" +source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jsonpatch" }, { name = "langsmith" }, @@ -1751,9 +1800,22 @@ dependencies = [ { name = "tenacity" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/02/d6/5a16b853a19ba0200dbf77010d9a3b3effc84c62bc952fff7bf81e90f9d8/langchain_core-0.3.13.tar.gz", hash = "sha256:d3a6c838284ff73705dd0f24a36cd8b2fa34a348e6b357e6b3d58199ab063cde", size = 327206 } +sdist = { url = "https://files.pythonhosted.org/packages/de/f2/1787f9e7fcf6bee70f7a8f488ee95b814408706ab35d61dbba279fbe7361/langchain_core-0.3.28.tar.gz", hash = "sha256:407f7607e6b3c0ebfd6094da95d39b701e22e59966698ef126799782953e7f2c", size = 330743 } wheels = [ - { url = "https://files.pythonhosted.org/packages/38/53/b5436750c392370cff44f8e3669a4886fa18579ad0ce33a505f8f261c1a0/langchain_core-0.3.13-py3-none-any.whl", hash = "sha256:e79cfac046cab293c02047f081741f4a433ca5aa54a3973e179eaef147cdfba4", size = 408049 }, + { url = "https://files.pythonhosted.org/packages/9a/bf/07e63d4b4c41aa49daf4a4499e8010928ce8545469f0265544f925c95fff/langchain_core-0.3.28-py3-none-any.whl", hash = "sha256:a02f81ca53a8eed757133797e5a602ca80c1324bbecb0c5d86ef7bd3d6625372", size = 411553 }, +] + +[[package]] +name = "langchain-experimental" +version = "0.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-community" }, + { name = "langchain-core" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/27/56/a8acbb08a03383c28875b3b151e4cefea5612266917fbd6fc3c14c21e172/langchain_experimental-0.3.4.tar.gz", hash = "sha256:937c4259ee4a639c618d19acf0e2c5c2898ef127050346edc5655259aa281a21", size = 140532 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/27/fe8caa4884611286b1f7d6c5cfd76e1fef188faaa946db4fde6daa1cd2cd/langchain_experimental-0.3.4-py3-none-any.whl", hash = "sha256:2e587306aea36b60fa5e5fc05dc7281bee9f60a806f0bf9d30916e0ee096af80", size = 209154 }, ] [[package]] @@ -1770,6 +1832,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/66/ea/dcc59d9b818a4d7f25d4d6b3018355a0e0243a351b1d4ef8b26ec107ee00/langchain_openai-0.2.3-py3-none-any.whl", hash = "sha256:f498c94817c980cb302439b95d3f3275cdf2743e022ee674692c75898523cf57", size = 49907 }, ] +[[package]] +name = "langchain-text-splitters" +version = "0.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/10/a5/215816cc376c4f3853c961ae98022a6d57e74f6859c206f29716173e9a73/langchain_text_splitters-0.3.4.tar.gz", hash = "sha256:f3cedea469684483b4492d9f11dc2fa66388dab01c5d5c5307925515ab884c24", size = 22205 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/37/b2c971dfea62675f640e0dd5a078ca502885059276d21b7143cf1fe13e82/langchain_text_splitters-0.3.4-py3-none-any.whl", hash = "sha256:432fdb39c161d4d0db16d61d38af068dc5dd4dd08082febd2fced81304b2725c", size = 27783 }, +] + [[package]] name = "langgraph" version = "0.2.39"