From ac822312ffc3c590ba7d89827c3d2b0acf9d7ea1 Mon Sep 17 00:00:00 2001 From: Niel Teng Hu Date: Tue, 2 Jan 2024 18:48:22 -0500 Subject: [PATCH] fix bugs and update notebooks (#117) --- actionweaver/actions/action.py | 2 +- actionweaver/llms/azure/chat.py | 1 - actionweaver/llms/openai/tools/chat.py | 2 - .../notebooks/cookbooks/azure_tutorial.ipynb | 706 ------------------ .../source/notebooks/cookbooks/cookbook.ipynb | 5 +- .../notebooks/cookbooks/stateful_agent.ipynb | 39 +- pyproject.toml | 2 +- 7 files changed, 26 insertions(+), 731 deletions(-) delete mode 100644 docs/source/notebooks/cookbooks/azure_tutorial.ipynb diff --git a/actionweaver/actions/action.py b/actionweaver/actions/action.py index 57d82ca..9dcf5a1 100644 --- a/actionweaver/actions/action.py +++ b/actionweaver/actions/action.py @@ -199,7 +199,7 @@ def __init__( stop=False, instance=None, ): - super().__init__(name, decorated_obj, logger, stop) + super().__init__(name, decorated_obj, stop=stop, logger=logger) self.instance = instance self.pydantic_cls = pydantic_cls diff --git a/actionweaver/llms/azure/chat.py b/actionweaver/llms/azure/chat.py index 4c85140..b350fc7 100644 --- a/actionweaver/llms/azure/chat.py +++ b/actionweaver/llms/azure/chat.py @@ -131,7 +131,6 @@ def _invoke_function( functions, orch, action_handler, - logger=logging.getLogger(__name__), ): """Invoke the function, update the messages, returns functions argument for the next OpenAI API call or halt the function loop and return the response.""" diff --git a/actionweaver/llms/openai/tools/chat.py b/actionweaver/llms/openai/tools/chat.py index 9bc81d8..8683473 100644 --- a/actionweaver/llms/openai/tools/chat.py +++ b/actionweaver/llms/openai/tools/chat.py @@ -50,7 +50,6 @@ def _invoke_tool( tools, orch, action_handler: ActionHandlers, - logger=logging.getLogger(__name__), ): messages += [response_msg] @@ -298,7 +297,6 @@ def new_create( tools, orch, action_handler, - logger, ) if stop: return resp diff --git a/docs/source/notebooks/cookbooks/azure_tutorial.ipynb b/docs/source/notebooks/cookbooks/azure_tutorial.ipynb deleted file mode 100644 index 5f0eb58..0000000 --- a/docs/source/notebooks/cookbooks/azure_tutorial.ipynb +++ /dev/null @@ -1,706 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "c179ed82-6be2-42cf-8519-0a590344fcc0", - "metadata": {}, - "source": [ - "## ActionWeaver + Azure OpenAI Tutorial\n", - "\n", - "**ActionWeaver: AI Application Framework**\n", - "\n", - "ActionWeaver is an AI application framework that puts function-calling as a first-class feature, supporting both OpenAI API and Azure OpenAI service!\n", - "\n", - "\n", - " " - ] - }, - { - "cell_type": "markdown", - "id": "2a6418bf-d868-48cf-b469-4941971ceab6", - "metadata": {}, - "source": [ - "First, let's set up an Azure OpenAI Client and then use ActionWeaver patch to enhance its API\n" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "bc6a2fd0-889b-4006-901e-c8a57834d4ae", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from openai import AzureOpenAI\n", - "import actionweaver.llms as llms\n", - "\n", - "# Azure OpenAI\n", - "model=\"gpt-35-turbo-0613-16k\"\n", - "client = llms.patch(AzureOpenAI(\n", - " azure_endpoint = os.getenv(\"AZURE_OPENAI_ENDPOINT\"), \n", - " api_key=os.getenv(\"AZURE_OPENAI_KEY\"), \n", - " api_version=\"2023-10-01-preview\"\n", - "))\n", - "\n", - "# If you want to use OpenAI endpoint\n", - "# from openai import OpenAI\n", - "# client = llms.patch(OpenAI())" - ] - }, - { - "cell_type": "markdown", - "id": "78d200d8-ec3b-499b-94af-148d337fc055", - "metadata": {}, - "source": [ - "Users can add any function as a tool with a simple action decorator. In this example, we want to enable the LLM to request information about time and weather." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "1f450232-feff-440c-8f84-04bcc131a308", - "metadata": {}, - "outputs": [], - "source": [ - "from actionweaver import action\n", - "\n", - "@action(name=\"GetCurrentTime\")\n", - "def get_current_time() -> str:\n", - " \"\"\"\n", - " Use this for getting the current time in the specified time zone.\n", - " \n", - " :return: A string representing the current time in the specified time zone.\n", - " \"\"\"\n", - " print (\"Getting current time...\")\n", - " import datetime\n", - " current_time = datetime.datetime.now()\n", - " \n", - " return f\"The current time is {current_time}\"\n", - "\n", - "\n", - "@action(name=\"GetWeather\", stop=False)\n", - "def get_current_weather(location:str, unit:str=\"fahrenheit\"):\n", - " \"\"\"Get the current weather in a given location\"\"\"\n", - " print (\"Getting current weather\")\n", - " \n", - " import json\n", - " if \"tokyo\" in location.lower():\n", - " return json.dumps({\"location\": \"Tokyo\", \"temperature\": \"10\", \"unit\": \"celsius\"})\n", - " elif \"san francisco\" in location.lower():\n", - " return json.dumps({\"location\": \"San Francisco\", \"temperature\": \"72\", \"unit\": \"fahrenheit\"})\n", - " elif \"paris\" in location.lower():\n", - " return json.dumps({\"location\": \"Paris\", \"temperature\": \"22\", \"unit\": \"celsius\"})\n", - " else:\n", - " return json.dumps({\"location\": location, \"temperature\": \"unknown\"})\n" - ] - }, - { - "cell_type": "markdown", - "id": "39420854-95f4-4df7-ac91-a529bb915c60", - "metadata": {}, - "source": [ - "Invoke the chat completion API, this time including an additional **actions** parameter." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "87b432dc-bab4-438b-b70d-6e5e60370dd7", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Getting current time...\n", - "Getting current weather\n", - "Getting current weather\n" - ] - }, - { - "data": { - "text/plain": [ - "ChatCompletion(id='chatcmpl-8asVAnF9jyZWRMXaqBIKdniDqDaS2', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The current time is 2023-12-28 16:46:44.\\n\\nThe weather in San Francisco is 72°F.\\n\\nThe weather in Paris is 22°C.', role='assistant', function_call=None, tool_calls=None), content_filter_results={'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}})], created=1703800004, model='gpt-35-turbo-16k', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=38, prompt_tokens=239, total_tokens=277), prompt_filter_results=[{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}])" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "messages = [\n", - " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n", - " {\"role\": \"user\", \"content\": \"what time is it, and what's the weather in San Francisco and Paris ?\"}\n", - " ]\n", - "\n", - "\n", - "response = client.chat.completions.create(\n", - " model=model,\n", - " messages=messages,\n", - " actions = [get_current_weather, get_current_time],\n", - " stream=False, \n", - ")\n", - "\n", - "response" - ] - }, - { - "cell_type": "markdown", - "id": "556b7e94-2346-447b-a4e7-b0b059ae2da2", - "metadata": {}, - "source": [ - "We can also convert text into structured data, let's define some Pydantic models" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "c3a71181-d905-4f9b-82a9-7855457840fe", - "metadata": {}, - "outputs": [], - "source": [ - "# Example inspired by https://jxnl.github.io/instructor/examples/autodataframe/\n", - "from typing import List, Any\n", - "from pydantic import BaseModel, Field\n", - "\n", - "\n", - "class Dataframe(BaseModel):\n", - " \"\"\"\n", - " Class representing a dataframe.\n", - " \"\"\"\n", - " name: str = Field(..., description=\"The name of the dataframe\")\n", - " data: List[List[Any]] = Field(\n", - " ...,\n", - " description=\"\"\"Correct rows of data aligned to column names, Nones are allowed.\"\"\",\n", - " )\n", - " columns: List[str] = Field(\n", - " ...,\n", - " description=\"Column names relevant from source data, should be in snake_case\",\n", - " )\n", - "\n", - " def to_pandas(self):\n", - " import pandas as pd\n", - " return pd.DataFrame(data=self.data, columns=self.columns)\n", - "\n", - "\n", - " def chat(self, query):\n", - " messages = [\n", - " {\n", - " \"role\": \"system\",\n", - " \"content\": f\"You're a helpful agent trying to answer questions based on {str(self)}\",\n", - " },\n", - " {\n", - " \"role\": \"user\",\n", - " \"content\": f\"{query}\",\n", - " },\n", - " ]\n", - " return client.chat.completions.create(\n", - " model=model,\n", - " messages=messages,\n", - " temperature=1,\n", - " stream=False,\n", - " )\n", - "\n", - "class Database(BaseModel):\n", - " \"\"\"\n", - " A set of dataframes\n", - " \"\"\"\n", - " tables: List[Dataframe] = Field(..., description=\"List of dataframes\")\n" - ] - }, - { - "cell_type": "markdown", - "id": "172258eb-7c36-49b9-8ca9-0634903f76dc", - "metadata": {}, - "source": [ - "Use the **action_from_model** to transform a PyDantic model into an action, followed by using the `invoke` method of the action. Applying `force=True` forces the LLM to execute the action." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "d75aabdf-ae48-4c51-ad62-b653385bbce9", - "metadata": {}, - "outputs": [], - "source": [ - "from actionweaver.actions.factories.pydantic_model_to_action import action_from_model\n", - "\n", - "\n", - "data =\"\"\"Mary, 35, from Chicago, is a soccer aficionado. She heads the 'Falcons,' a soccer team with 14 members.\n", - "\n", - "David, a 28-year-old swimming enthusiast from Miami, leads the 'Sharks,' a team of 12 swimmers.\n", - "\n", - "Emily, 22, in Boston, is devoted to volleyball. She captains the 'Eagles,' which includes 16 volleyball players.\n", - "\n", - "Chris, 32, from Seattle, is an avid cyclist and heads the 'Wolves,' a 12-member cycling team.\n", - "\n", - "Anna, 27, in Denver, excels in golf and captains the 'Bears,' a team of 9 golfers.\n", - "\n", - "In Dallas, there's Leo, 24, who loves hockey. He leads the 'Panthers,' a hockey team of 13.\n", - "\n", - "Grace, 29, in Atlanta, is passionate about rowing. She captains the 'Dolphins,' a rowing team of 11 athletes.\n", - "\n", - "In Phoenix, Alex, 26, is a skilled archer and heads the 'Hawks,' an archery team with 8 members.\n", - "\n", - "Finally, in Portland, we have Zoe, 31, who is a master at badminton. She captains the 'Rabbits,' a badminton team of 10 players.\n", - "\"\"\"\n", - "\n", - "db = action_from_model(Database).invoke(client, model=model, messages=[{\"role\": \"user\", \"content\": data}], force=True)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "ac8eb758-8653-4502-8661-353f9b082843", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
Team NameSportCaptainAgeCityNumber of Players
0FalconsSoccerMary35Chicago14
1SharksSwimmingDavid28Miami12
2EaglesVolleyballEmily22Boston16
3WolvesCyclingChris32Seattle12
4BearsGolfAnna27Denver9
5PanthersHockeyLeo24Dallas13
6DolphinsRowingGrace29Atlanta11
7HawksArcheryAlex26Phoenix8
8RabbitsBadmintonZoe31Portland10
\n", - "
" - ], - "text/plain": [ - " Team Name Sport Captain Age City Number of Players\n", - "0 Falcons Soccer Mary 35 Chicago 14\n", - "1 Sharks Swimming David 28 Miami 12\n", - "2 Eagles Volleyball Emily 22 Boston 16\n", - "3 Wolves Cycling Chris 32 Seattle 12\n", - "4 Bears Golf Anna 27 Denver 9\n", - "5 Panthers Hockey Leo 24 Dallas 13\n", - "6 Dolphins Rowing Grace 29 Atlanta 11\n", - "7 Hawks Archery Alex 26 Phoenix 8\n", - "8 Rabbits Badminton Zoe 31 Portland 10" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "db.tables[0].to_pandas()" - ] - }, - { - "cell_type": "markdown", - "id": "cf0f86ac-e2f4-466b-8d62-1532215bbc35", - "metadata": {}, - "source": [ - "Let's put everything together by building a database agent. This time, we include an additional actions parameter `orch` when calling the chat completion API. This feature will allow us for more precise control over the specific set of tools available to the LLM during each interaction.\n", - "\n", - "Example:\n", - "```python\n", - "client.chat.completions.create(\n", - " messages = ...\n", - " actions=[a1, a2, a3], # First, LLM respond with either a1, a2 or a3, or text without action\n", - " # Define the orchestration logic for actions:\n", - " orch={\n", - " a1.name: [a2, a3], # If a1 is invoked, the next response will be either a2, a3 or a text response.\n", - " a2.name: a3, # If a2 is invoked, the next action will be a3\n", - " a3.name: [a4] # If a3 is invoked, the next response will be a4 or a text response.\n", - " a4.name: None # If a4 is invoked, the next response will guarantee to be a text message\n", - " }\n", - ")\n", - "```\n", - "\n", - "For details please take a look at [here](https://github.com/TengHu/ActionWeaver?tab=readme-ov-file#orchestration-of-actions-experimental )\n" - ] - }, - { - "cell_type": "code", - "execution_count": 58, - "id": "ee100daf-da26-45dd-a288-e753a9e518cc", - "metadata": {}, - "outputs": [], - "source": [ - "class DBAgent:\n", - " def __init__(self):\n", - " self.db = Database(tables=[])\n", - " self.messages = [{\"role\": \"system\", \"content\": f\"You're a helpful agent. You answer questions in plain English. Tables available: {[table.name for table in self.db.tables]}\"}]\n", - "\n", - " @action(\"ReadDataAndCreateTable\")\n", - " def read(self, data: str):\n", - " \"\"\" \n", - " This method is used to read unstructured data and create tables.\n", - " \n", - " Args:\n", - " data (str): The unstructured data to be processed\n", - " \"\"\"\n", - " print (f\"[Reading data]: \\n {data} \\n\")\n", - " messages=[\n", - " {\n", - " \"role\": \"system\",\n", - " \"content\": \"\"\"Map this data into a dataframe and correctly define the correct columns and rows\"\"\",\n", - " },\n", - " {\n", - " \"role\": \"user\",\n", - " \"content\": f\"{data}\",\n", - " },\n", - " ]\n", - " db = action_from_model(Database, stop=True).invoke(client, messages=messages, model=model, temperature=0.1, stream=False, force=True)\n", - " self.db.tables.extend(db.tables)\n", - " return f\"{[table.name for table in db.tables]} have been created\"\n", - "\n", - "\n", - " @action(\"AnswerQuestion\")\n", - " def answer(self, query: str, tables:List[str]):\n", - " \"\"\" \n", - " Responds to plain English queries by extracting relevant information from predefined data tables.\n", - " \n", - " This method is specifically designed to process and answer questions that can be addressed using the data available in the provided tables. The tables are not SQL tables.\n", - " \n", - " Parameters:\n", - " query (str): A plain English question or query that the user wants to be answered. The method processes this query to understand and extract the necessary information from the available tables.\n", - " tables (List[str]): A list of table names or identifiers. These tables contain the data that will be used to answer the query.\n", - " \n", - " Usage:\n", - " This method should be invoked when there is a need to answer questions based on the information contained in the specified tables. It is essential that the tables provided have the relevant data to address the query.\n", - " \"\"\"\n", - " candidates = [table for table in self.db.tables if table.name in tables]\n", - " context = [table.to_pandas().to_json() for table in candidates]\n", - " \n", - " print (f\"[Answering]: {query} using context {context} from tables {tables}\")\n", - "\n", - " messages=[\n", - " {\n", - " \"role\": \"system\",\n", - " \"content\": f\"You're a helpful agent trying to answer questions based on context: \\n {context}\"\n", - " },\n", - " {\n", - " \"role\": \"user\",\n", - " \"content\": f\"{query}\",\n", - " },\n", - " ]\n", - "\n", - " response = client.chat.completions.create(\n", - " model=model,\n", - " temperature=.5,\n", - " messages=messages,\n", - " stream=False,\n", - " )\n", - " return response\n", - "\n", - "\n", - "\n", - " def chat(self, query):\n", - " self.messages.append({\"role\": \"user\", \"content\": f\"{query}\"})\n", - " response = client.chat.completions.create(\n", - " model=model,\n", - " messages=self.messages,\n", - " temperature=.1,\n", - " actions = [self.read, self.answer, get_current_time],\n", - " orch = {self.read.name: None, self.answer.name: None, get_current_time.name: None}, # function is called at most once\n", - " stream=False\n", - " )\n", - " self.messages.append({\"role\": \"assistant\", \"content\": response.choices[0].message.content})\n", - " return response\n", - "\n", - "agent = DBAgent()" - ] - }, - { - "cell_type": "code", - "execution_count": 59, - "id": "9527bf1a-9f3d-4932-8172-1e2d0a1b9ca7", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[Reading data]: \n", - " Name, Age, Location, Sport, Team\n", - "Mary, 35, Chicago, Soccer, Falcons\n", - "David, 28, Miami, Swimming, Sharks\n", - "Emily, 22, Boston, Volleyball, Eagles\n", - "Chris, 32, Seattle, Cycling, Wolves\n", - "Anna, 27, Denver, Golf, Bears\n", - "Leo, 24, Dallas, Hockey, Panthers\n", - "Grace, 29, Atlanta, Rowing, Dolphins\n", - "Alex, 26, Phoenix, Archery, Hawks\n", - "Zoe, 31, Portland, Badminton, Rabbits \n", - "\n" - ] - }, - { - "data": { - "text/plain": [ - "ChatCompletion(id='chatcmpl-8asltiAH6eFjL5m40hxAbjxKuknpt', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"I have created a table called 'Players' with the following information:\\n\\n| Name | Age | Location | Sport | Team |\\n|-------|-----|----------|------------|----------|\\n| Mary | 35 | Chicago | Soccer | Falcons |\\n| David | 28 | Miami | Swimming | Sharks |\\n| Emily | 22 | Boston | Volleyball | Eagles |\\n| Chris | 32 | Seattle | Cycling | Wolves |\\n| Anna | 27 | Denver | Golf | Bears |\\n| Leo | 24 | Dallas | Hockey | Panthers |\\n| Grace | 29 | Atlanta | Rowing | Dolphins |\\n| Alex | 26 | Phoenix | Archery | Hawks |\\n| Zoe | 31 | Portland | Badminton | Rabbits |\\n\\nLet me know if there's anything specific you would like to know about these teams or players.\", role='assistant', function_call=None, tool_calls=None), content_filter_results={'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}})], created=1703801041, model='gpt-35-turbo-16k', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=209, prompt_tokens=440, total_tokens=649), prompt_filter_results=[{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}])" - ] - }, - "execution_count": 59, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "data =\"\"\"Mary, 35, from Chicago, is a soccer aficionado. She heads the 'Falcons,' a soccer team with 14 members.\n", - "\n", - "David, a 28-year-old swimming enthusiast from Miami, leads the 'Sharks,' a team of 12 swimmers.\n", - "\n", - "Emily, 22, in Boston, is devoted to volleyball. She captains the 'Eagles,' which includes 16 volleyball players.\n", - "\n", - "Chris, 32, from Seattle, is an avid cyclist and heads the 'Wolves,' a 12-member cycling team.\n", - "\n", - "Anna, 27, in Denver, excels in golf and captains the 'Bears,' a team of 9 golfers.\n", - "\n", - "In Dallas, there's Leo, 24, who loves hockey. He leads the 'Panthers,' a hockey team of 13.\n", - "\n", - "Grace, 29, in Atlanta, is passionate about rowing. She captains the 'Dolphins,' a rowing team of 11 athletes.\n", - "\n", - "In Phoenix, Alex, 26, is a skilled archer and heads the 'Hawks,' an archery team with 8 members.\n", - "\n", - "Finally, in Portland, we have Zoe, 31, who is a master at badminton. She captains the 'Rabbits,' a badminton team of 10 players.\n", - "\"\"\"\n", - "\n", - "agent.chat(data)" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "id": "a83c3548-2e11-4983-9d61-222a9af4d202", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[Dataframe(name='Players', data=[['Mary', '35', 'Chicago', 'Soccer', 'Falcons'], ['David', '28', 'Miami', 'Swimming', 'Sharks'], ['Emily', '22', 'Boston', 'Volleyball', 'Eagles'], ['Chris', '32', 'Seattle', 'Cycling', 'Wolves'], ['Anna', '27', 'Denver', 'Golf', 'Bears'], ['Leo', '24', 'Dallas', 'Hockey', 'Panthers'], ['Grace', '29', 'Atlanta', 'Rowing', 'Dolphins'], ['Alex', '26', 'Phoenix', 'Archery', 'Hawks'], ['Zoe', '31', 'Portland', 'Badminton', 'Rabbits']], columns=['Name', 'Age', 'Location', 'Sport', 'Team'])]" - ] - }, - "execution_count": 33, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "agent.db.tables" - ] - }, - { - "cell_type": "code", - "execution_count": 60, - "id": "8f00eb88-8ab4-4c80-a2fc-37d6e408f26a", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[Answering]: Who are the people below the age of 30? using context ['{\"Name\":{\"0\":\"Mary\",\"1\":\"David\",\"2\":\"Emily\",\"3\":\"Chris\",\"4\":\"Anna\",\"5\":\"Leo\",\"6\":\"Grace\",\"7\":\"Alex\",\"8\":\"Zoe\"},\"Age\":{\"0\":\"35\",\"1\":\"28\",\"2\":\"22\",\"3\":\"32\",\"4\":\"27\",\"5\":\"24\",\"6\":\"29\",\"7\":\"26\",\"8\":\"31\"},\"Location\":{\"0\":\"Chicago\",\"1\":\"Miami\",\"2\":\"Boston\",\"3\":\"Seattle\",\"4\":\"Denver\",\"5\":\"Dallas\",\"6\":\"Atlanta\",\"7\":\"Phoenix\",\"8\":\"Portland\"},\"Sport\":{\"0\":\"Soccer\",\"1\":\"Swimming\",\"2\":\"Volleyball\",\"3\":\"Cycling\",\"4\":\"Golf\",\"5\":\"Hockey\",\"6\":\"Rowing\",\"7\":\"Archery\",\"8\":\"Badminton\"},\"Team\":{\"0\":\"Falcons\",\"1\":\"Sharks\",\"2\":\"Eagles\",\"3\":\"Wolves\",\"4\":\"Bears\",\"5\":\"Panthers\",\"6\":\"Dolphins\",\"7\":\"Hawks\",\"8\":\"Rabbits\"}}'] from tables ['Players']\n" - ] - }, - { - "data": { - "text/plain": [ - "ChatCompletion(id='chatcmpl-8aslzyUIcofJbF6GSEzQ9RmNyFNwd', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The people below the age of 30 are David, Emily, Anna, Leo, Alex, and Zoe.', role='assistant', function_call=None, tool_calls=None), content_filter_results={'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}})], created=1703801047, model='gpt-35-turbo-16k', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=22, prompt_tokens=984, total_tokens=1006), prompt_filter_results=[{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}])" - ] - }, - "execution_count": 60, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "agent.chat(\"find out people below age 30\")" - ] - }, - { - "cell_type": "code", - "execution_count": 61, - "id": "2a306356-b0af-4d92-a79a-d46bd19a8b23", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[Reading data]: \n", - " Name, Age, Location, Sport, Team\n", - "David, 28, Miami, Swimming, Sharks\n", - "Emily, 22, Boston, Volleyball, Eagles\n", - "Anna, 27, Denver, Golf, Bears\n", - "Leo, 24, Dallas, Hockey, Panthers\n", - "Alex, 26, Phoenix, Archery, Hawks\n", - "Zoe, 31, Portland, Badminton, Rabbits \n", - "\n" - ] - }, - { - "data": { - "text/plain": [ - "ChatCompletion(id='chatcmpl-8asm4sMzPkLtt8y2PH3yZMVZaoOPp', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"I have created a new table called 'Players' with the information of the people below the age of 30:\\n\\n| Name | Age | Location | Sport | Team |\\n|-------|-----|----------|------------|----------|\\n| David | 28 | Miami | Swimming | Sharks |\\n| Emily | 22 | Boston | Volleyball | Eagles |\\n| Anna | 27 | Denver | Golf | Bears |\\n| Leo | 24 | Dallas | Hockey | Panthers |\\n| Alex | 26 | Phoenix | Archery | Hawks |\\n| Zoe | 31 | Portland | Badminton | Rabbits |\\n\\nLet me know if there's anything else I can assist you with.\", role='assistant', function_call=None, tool_calls=None), content_filter_results={'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}})], created=1703801052, model='gpt-35-turbo-16k', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=164, prompt_tokens=1134, total_tokens=1298), prompt_filter_results=[{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}])" - ] - }, - "execution_count": 61, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "agent.chat(\"\"\"create new table from it\"\"\")" - ] - }, - { - "cell_type": "code", - "execution_count": 62, - "id": "59356d48-413a-46db-bcda-7d3e158df8ee", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "2" - ] - }, - "execution_count": 62, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "len(agent.db.tables)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ff477ae7-2daf-4e32-9670-faac33ad5712", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docs/source/notebooks/cookbooks/cookbook.ipynb b/docs/source/notebooks/cookbooks/cookbook.ipynb index 702ffbc..9ed4b8a 100644 --- a/docs/source/notebooks/cookbooks/cookbook.ipynb +++ b/docs/source/notebooks/cookbooks/cookbook.ipynb @@ -9,7 +9,7 @@ "\n", "- [Quickstart](quickstart.ipynb)\n", "- [Logging & Tracing](logging.ipynb)\n", - "- [ActionWeaver + Azure](azure_tutorial.ipynb)\n", + "\n", "- [Structured Extraction](structured_extraction.ipynb)\n", "- [Action Orchestration](orchestration.ipynb)\n", "- [Stateful Agent](stateful_agent.ipynb)\n", @@ -18,7 +18,8 @@ "\n", "# Examples\n", "\n", - "- [Function Calling with Mistralai/Mistral-7B-Instruct-v0.1 through Anyscale Endpoints](anyscale.ipynb)\n" + "- [Function Calling with Mistralai/Mistral-7B-Instruct-v0.1 through Anyscale Endpoints](anyscale.ipynb)\n", + "- [Tabular Data Extraction](extract_tabular_data.ipynb)\n" ] }, { diff --git a/docs/source/notebooks/cookbooks/stateful_agent.ipynb b/docs/source/notebooks/cookbooks/stateful_agent.ipynb index 5620684..f78e7d0 100644 --- a/docs/source/notebooks/cookbooks/stateful_agent.ipynb +++ b/docs/source/notebooks/cookbooks/stateful_agent.ipynb @@ -11,7 +11,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "70c94281-1d38-4e34-bbd0-92ff70227482", "metadata": {}, "outputs": [], @@ -29,7 +29,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "079d94a0-19ba-4874-8db3-0b1f28230da8", "metadata": {}, "outputs": [], @@ -63,10 +63,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "c9138451-e0b5-44bd-b768-2c9f25bbcedb", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "ChatCompletion(id='chatcmpl-8cigjADM3wmMGnKhATp69Iojk41ZY', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The current time is 18:42:16.', role='assistant', function_call=None, tool_calls=None))], created=1704238937, model='gpt-3.5-turbo-0613', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=12, prompt_tokens=100, total_tokens=112))" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# You can invoke actions just like regular instance methods\n", "agent.get_current_time() # Output: 'The current time is 20:34.'\n", @@ -143,7 +154,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 5, "id": "1fffbbb7-c8e3-4a98-9564-01ed054c5443", "metadata": {}, "outputs": [], @@ -204,7 +215,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 9, "id": "9ed4906a-b57e-4f78-b9a1-bea1c2a195ce", "metadata": {}, "outputs": [], @@ -214,7 +225,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 10, "id": "4e8eca2b-a052-4f38-9f57-3b42cfc362d1", "metadata": {}, "outputs": [ @@ -229,25 +240,17 @@ { "data": { "text/plain": [ - "ChatCompletion(id='chatcmpl-8atToDzgZh5C9gh4xNWYsJtsiaNPS', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Here are the files in the current repository:\\n\\n- langsmith.ipynb\\n- azure_tutorial-Copy1.ipynb\\n- parallel_tools.log\\n- untitled.md\\n- parallel_tools.ipynb\\n- test.log\\n- stateful_agent.ipynb\\n- huggingface.ipynb\\n- anyscale.ipynb\\n- ReAct.ipynb\\n- structured_extraction.log\\n- quickstart.ipynb\\n- structured_extraction.ipynb\\n- azure_tutorial.ipynb\\n- litellm.ipynb\\n- cookbook.ipynb\\n- agent.log\\n- orchestration.ipynb', role='assistant', function_call=None, tool_calls=None))], created=1703803764, model='gpt-3.5-turbo-0613', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=130, prompt_tokens=256, total_tokens=386))" + "ChatCompletion(id='chatcmpl-8cihLJHsfQZz3hPPHAyamQ7HVPm85', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Here are all the files in the current repository:\\n\\n1. langsmith.ipynb\\n2. azure_tutorial-Copy1.ipynb\\n3. parallel_tools.log\\n4. untitled.md\\n5. parallel_tools.ipynb\\n6. network.html\\n7. stateful_agent.ipynb\\n8. huggingface.ipynb\\n9. anyscale.ipynb\\n10. ReAct.ipynb\\n11. tracing.log\\n12. structured_extraction.log\\n13. quickstart.ipynb\\n14. structured_extraction.ipynb\\n15. azure_tutorial.ipynb\\n16. litellm.ipynb\\n17. cookbook.ipynb\\n18. logging.ipynb\\n19. nx.html\\n20. agent.log\\n21. logging-Copy1.ipynb\\n22. orchestration.ipynb\\n\\nLet me know if there's anything else I can help with!\", role='assistant', function_call=None, tool_calls=None))], created=1704238975, model='gpt-3.5-turbo-0613', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=190, prompt_tokens=306, total_tokens=496))" ] }, - "execution_count": 36, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "agent(\"Take file action [list all files in current repository]\")" + "agent(\"Take file action of [list all files in current repository]\")" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9d0d2274-c011-4cb8-9710-da1b34bbbb07", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/pyproject.toml b/pyproject.toml index 586c354..d4f9b46 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "actionweaver" -version = "0.0.21" +version = "0.0.22" description = "An Application Framework for Building LLM Agents" authors = ['Teng "Niel" Hu '] readme = "README.md"