diff --git a/README.md b/README.md
index 107b638..e0e11ca 100644
--- a/README.md
+++ b/README.md
@@ -212,10 +212,10 @@ client.chat.completions.create(
     actions=[a1, a2, a3], # First, LLM respond with either a1, a2 or a3, or text without action
     # Define the orchestration logic for actions:
     orch={
-        a1: [a2, a3],  # If a1 is invoked, the next response will be either a2, a3 or a text response.
-        a2: a3,      # If a2 is invoked, the next action will be a3
-        a3: [a4]     # If a3 is invoked, the next response will be a4 or a text response.
-        a4: None     # If a4 is invoked, the next response will guarantee to be a text message
+        a1.name: [a2, a3],  # If a1 is invoked, the next response will be either a2, a3 or a text response.
+        a2.name: a3,      # If a2 is invoked, the next action will be a3
+        a3.name: [a4]     # If a3 is invoked, the next response will be a4 or a text response.
+        a4.name: None     # If a4 is invoked, the next response will guarantee to be a text message
     }
 )
 ```
@@ -280,7 +280,7 @@ class FileAgent(AgentV0):
 
     def __call__(self, text):
         self.messages += [{"role": "user", "content":text}]
-        return self.llm.chat.completions.create(model="gpt-3.5-turbo", messages=self.messages, actions = [self.list_all_files_in_repo], orch = {self.handle_file: [self.list_all_files_in_repo, self.read_from_file]})
+        return self.llm.chat.completions.create(model="gpt-3.5-turbo", messages=self.messages, actions = [self.list_all_files_in_repo], orch = {self.handle_file.name: [self.list_all_files_in_repo, self.read_from_file]})
 ```
 
 ## Contributing
diff --git a/docs/source/notebooks/cookbooks/azure_tutorial.ipynb b/docs/source/notebooks/cookbooks/azure_tutorial.ipynb
index a3e258e..5f0eb58 100644
--- a/docs/source/notebooks/cookbooks/azure_tutorial.ipynb
+++ b/docs/source/notebooks/cookbooks/azure_tutorial.ipynb
@@ -25,7 +25,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 28,
+   "execution_count": 1,
    "id": "bc6a2fd0-889b-4006-901e-c8a57834d4ae",
    "metadata": {},
    "outputs": [],
@@ -57,7 +57,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 29,
+   "execution_count": 2,
    "id": "1f450232-feff-440c-8f84-04bcc131a308",
    "metadata": {},
    "outputs": [],
@@ -79,7 +79,7 @@
     "\n",
     "\n",
     "@action(name=\"GetWeather\", stop=False)\n",
-    "def get_current_weather(location, unit=\"fahrenheit\"):\n",
+    "def get_current_weather(location:str, unit:str=\"fahrenheit\"):\n",
     "    \"\"\"Get the current weather in a given location\"\"\"\n",
     "    print (\"Getting current weather\")\n",
     "    \n",
@@ -104,7 +104,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 30,
+   "execution_count": 3,
    "id": "87b432dc-bab4-438b-b70d-6e5e60370dd7",
    "metadata": {},
    "outputs": [
@@ -120,10 +120,10 @@
     {
      "data": {
       "text/plain": [
-       "'The current time is 2023-12-19 09:50:43.573680.\\n\\nIn San Francisco, the weather is 72°F.\\n\\nIn Paris, the weather is 22°C.'"
+       "ChatCompletion(id='chatcmpl-8asVAnF9jyZWRMXaqBIKdniDqDaS2', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The current time is 2023-12-28 16:46:44.\\n\\nThe weather in San Francisco is 72°F.\\n\\nThe weather in Paris is 22°C.', role='assistant', function_call=None, tool_calls=None), content_filter_results={'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}})], created=1703800004, model='gpt-35-turbo-16k', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=38, prompt_tokens=239, total_tokens=277), prompt_filter_results=[{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}])"
       ]
      },
-     "execution_count": 30,
+     "execution_count": 3,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -155,7 +155,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 31,
+   "execution_count": 4,
    "id": "c3a71181-d905-4f9b-82a9-7855457840fe",
    "metadata": {},
    "outputs": [],
@@ -219,7 +219,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 32,
+   "execution_count": 5,
    "id": "d75aabdf-ae48-4c51-ad62-b653385bbce9",
    "metadata": {},
    "outputs": [],
@@ -251,7 +251,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 33,
+   "execution_count": 6,
    "id": "ac8eb758-8653-4502-8661-353f9b082843",
    "metadata": {},
    "outputs": [
@@ -276,94 +276,94 @@
        "  <thead>\n",
        "    <tr style=\"text-align: right;\">\n",
        "      <th></th>\n",
-       "      <th>Team</th>\n",
+       "      <th>Team Name</th>\n",
+       "      <th>Sport</th>\n",
        "      <th>Captain</th>\n",
        "      <th>Age</th>\n",
        "      <th>City</th>\n",
-       "      <th>Sport</th>\n",
-       "      <th>Number of Members</th>\n",
+       "      <th>Number of Players</th>\n",
        "    </tr>\n",
        "  </thead>\n",
        "  <tbody>\n",
        "    <tr>\n",
        "      <th>0</th>\n",
        "      <td>Falcons</td>\n",
+       "      <td>Soccer</td>\n",
        "      <td>Mary</td>\n",
        "      <td>35</td>\n",
        "      <td>Chicago</td>\n",
-       "      <td>Soccer</td>\n",
        "      <td>14</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>1</th>\n",
        "      <td>Sharks</td>\n",
+       "      <td>Swimming</td>\n",
        "      <td>David</td>\n",
        "      <td>28</td>\n",
        "      <td>Miami</td>\n",
-       "      <td>Swimming</td>\n",
        "      <td>12</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>2</th>\n",
        "      <td>Eagles</td>\n",
+       "      <td>Volleyball</td>\n",
        "      <td>Emily</td>\n",
        "      <td>22</td>\n",
        "      <td>Boston</td>\n",
-       "      <td>Volleyball</td>\n",
        "      <td>16</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>3</th>\n",
        "      <td>Wolves</td>\n",
+       "      <td>Cycling</td>\n",
        "      <td>Chris</td>\n",
        "      <td>32</td>\n",
        "      <td>Seattle</td>\n",
-       "      <td>Cycling</td>\n",
        "      <td>12</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>4</th>\n",
        "      <td>Bears</td>\n",
+       "      <td>Golf</td>\n",
        "      <td>Anna</td>\n",
        "      <td>27</td>\n",
        "      <td>Denver</td>\n",
-       "      <td>Golf</td>\n",
        "      <td>9</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>5</th>\n",
        "      <td>Panthers</td>\n",
+       "      <td>Hockey</td>\n",
        "      <td>Leo</td>\n",
        "      <td>24</td>\n",
        "      <td>Dallas</td>\n",
-       "      <td>Hockey</td>\n",
        "      <td>13</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>6</th>\n",
        "      <td>Dolphins</td>\n",
+       "      <td>Rowing</td>\n",
        "      <td>Grace</td>\n",
        "      <td>29</td>\n",
        "      <td>Atlanta</td>\n",
-       "      <td>Rowing</td>\n",
        "      <td>11</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>7</th>\n",
        "      <td>Hawks</td>\n",
+       "      <td>Archery</td>\n",
        "      <td>Alex</td>\n",
        "      <td>26</td>\n",
        "      <td>Phoenix</td>\n",
-       "      <td>Archery</td>\n",
        "      <td>8</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>8</th>\n",
        "      <td>Rabbits</td>\n",
+       "      <td>Badminton</td>\n",
        "      <td>Zoe</td>\n",
        "      <td>31</td>\n",
        "      <td>Portland</td>\n",
-       "      <td>Badminton</td>\n",
        "      <td>10</td>\n",
        "    </tr>\n",
        "  </tbody>\n",
@@ -371,19 +371,19 @@
        "</div>"
       ],
       "text/plain": [
-       "       Team Captain Age      City       Sport Number of Members\n",
-       "0   Falcons    Mary  35   Chicago      Soccer                14\n",
-       "1    Sharks   David  28     Miami    Swimming                12\n",
-       "2    Eagles   Emily  22    Boston  Volleyball                16\n",
-       "3    Wolves   Chris  32   Seattle     Cycling                12\n",
-       "4     Bears    Anna  27    Denver        Golf                 9\n",
-       "5  Panthers     Leo  24    Dallas      Hockey                13\n",
-       "6  Dolphins   Grace  29   Atlanta      Rowing                11\n",
-       "7     Hawks    Alex  26   Phoenix     Archery                 8\n",
-       "8   Rabbits     Zoe  31  Portland   Badminton                10"
+       "  Team Name       Sport Captain  Age      City  Number of Players\n",
+       "0   Falcons      Soccer    Mary   35   Chicago                 14\n",
+       "1    Sharks    Swimming   David   28     Miami                 12\n",
+       "2    Eagles  Volleyball   Emily   22    Boston                 16\n",
+       "3    Wolves     Cycling   Chris   32   Seattle                 12\n",
+       "4     Bears        Golf    Anna   27    Denver                  9\n",
+       "5  Panthers      Hockey     Leo   24    Dallas                 13\n",
+       "6  Dolphins      Rowing   Grace   29   Atlanta                 11\n",
+       "7     Hawks     Archery    Alex   26   Phoenix                  8\n",
+       "8   Rabbits   Badminton     Zoe   31  Portland                 10"
       ]
      },
-     "execution_count": 33,
+     "execution_count": 6,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -406,10 +406,10 @@
     "    actions=[a1, a2, a3], # First, LLM respond with either a1, a2 or a3, or text without action\n",
     "    # Define the orchestration logic for actions:\n",
     "    orch={\n",
-    "        a1: [a2, a3],  # If a1 is invoked, the next response will be either a2, a3 or a text response.\n",
-    "        a2: a3,      # If a2 is invoked, the next action will be a3\n",
-    "        a3: [a4]     # If a3 is invoked, the next response will be a4 or a text response.\n",
-    "        a4: None     # If a4 is invoked, the next response will guarantee to be a text message\n",
+    "        a1.name: [a2, a3],  # If a1 is invoked, the next response will be either a2, a3 or a text response.\n",
+    "        a2.name: a3,      # If a2 is invoked, the next action will be a3\n",
+    "        a3.name: [a4]     # If a3 is invoked, the next response will be a4 or a text response.\n",
+    "        a4.name: None     # If a4 is invoked, the next response will guarantee to be a text message\n",
     "    }\n",
     ")\n",
     "```\n",
@@ -419,7 +419,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 34,
+   "execution_count": 58,
    "id": "ee100daf-da26-45dd-a288-e753a9e518cc",
    "metadata": {},
    "outputs": [],
@@ -500,10 +500,10 @@
     "                              messages=self.messages,\n",
     "                              temperature=.1,\n",
     "                              actions = [self.read, self.answer, get_current_time],\n",
-    "                              orch = {self.read: None, self.answer: None, get_current_time: None}, # function is called at most once\n",
+    "                              orch = {self.read.name: None, self.answer.name: None, get_current_time.name: None}, # function is called at most once\n",
     "                              stream=False\n",
     "                    )\n",
-    "        self.messages.append({\"role\": \"assistant\", \"content\": response})\n",
+    "        self.messages.append({\"role\": \"assistant\", \"content\": response.choices[0].message.content})\n",
     "        return response\n",
     "\n",
     "agent = DBAgent()"
@@ -511,7 +511,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 35,
+   "execution_count": 59,
    "id": "9527bf1a-9f3d-4932-8172-1e2d0a1b9ca7",
    "metadata": {},
    "outputs": [
@@ -536,10 +536,10 @@
     {
      "data": {
       "text/plain": [
-       "\"I have created a table called 'Players' with the following information:\\n\\n| Name  | Age | Location | Sport      | Team     |\\n|-------|-----|----------|------------|----------|\\n| Mary  | 35  | Chicago  | Soccer     | Falcons  |\\n| David | 28  | Miami    | Swimming   | Sharks   |\\n| Emily | 22  | Boston   | Volleyball | Eagles   |\\n| Chris | 32  | Seattle  | Cycling    | Wolves   |\\n| Anna  | 27  | Denver   | Golf       | Bears    |\\n| Leo   | 24  | Dallas   | Hockey     | Panthers |\\n| Grace | 29  | Atlanta  | Rowing     | Dolphins |\\n| Alex  | 26  | Phoenix  | Archery    | Hawks    |\\n| Zoe   | 31  | Portland | Badminton  | Rabbits  |\\n\\nLet me know if there's anything specific you would like to know about these players or teams.\""
+       "ChatCompletion(id='chatcmpl-8asltiAH6eFjL5m40hxAbjxKuknpt', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"I have created a table called 'Players' with the following information:\\n\\n| Name  | Age | Location | Sport      | Team     |\\n|-------|-----|----------|------------|----------|\\n| Mary  | 35  | Chicago  | Soccer     | Falcons  |\\n| David | 28  | Miami    | Swimming   | Sharks   |\\n| Emily | 22  | Boston   | Volleyball | Eagles   |\\n| Chris | 32  | Seattle  | Cycling    | Wolves   |\\n| Anna  | 27  | Denver   | Golf       | Bears    |\\n| Leo   | 24  | Dallas   | Hockey     | Panthers |\\n| Grace | 29  | Atlanta  | Rowing     | Dolphins |\\n| Alex  | 26  | Phoenix  | Archery    | Hawks    |\\n| Zoe   | 31  | Portland | Badminton  | Rabbits  |\\n\\nLet me know if there's anything specific you would like to know about these teams or players.\", role='assistant', function_call=None, tool_calls=None), content_filter_results={'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}})], created=1703801041, model='gpt-35-turbo-16k', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=209, prompt_tokens=440, total_tokens=649), prompt_filter_results=[{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}])"
       ]
      },
-     "execution_count": 35,
+     "execution_count": 59,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -569,7 +569,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 36,
+   "execution_count": 33,
    "id": "a83c3548-2e11-4983-9d61-222a9af4d202",
    "metadata": {},
    "outputs": [
@@ -579,7 +579,7 @@
        "[Dataframe(name='Players', data=[['Mary', '35', 'Chicago', 'Soccer', 'Falcons'], ['David', '28', 'Miami', 'Swimming', 'Sharks'], ['Emily', '22', 'Boston', 'Volleyball', 'Eagles'], ['Chris', '32', 'Seattle', 'Cycling', 'Wolves'], ['Anna', '27', 'Denver', 'Golf', 'Bears'], ['Leo', '24', 'Dallas', 'Hockey', 'Panthers'], ['Grace', '29', 'Atlanta', 'Rowing', 'Dolphins'], ['Alex', '26', 'Phoenix', 'Archery', 'Hawks'], ['Zoe', '31', 'Portland', 'Badminton', 'Rabbits']], columns=['Name', 'Age', 'Location', 'Sport', 'Team'])]"
       ]
      },
-     "execution_count": 36,
+     "execution_count": 33,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -590,7 +590,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 37,
+   "execution_count": 60,
    "id": "8f00eb88-8ab4-4c80-a2fc-37d6e408f26a",
    "metadata": {},
    "outputs": [
@@ -604,10 +604,10 @@
     {
      "data": {
       "text/plain": [
-       "'The people below the age of 30 are David, Emily, Anna, Leo, Alex, and Zoe.'"
+       "ChatCompletion(id='chatcmpl-8aslzyUIcofJbF6GSEzQ9RmNyFNwd', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The people below the age of 30 are David, Emily, Anna, Leo, Alex, and Zoe.', role='assistant', function_call=None, tool_calls=None), content_filter_results={'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}})], created=1703801047, model='gpt-35-turbo-16k', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=22, prompt_tokens=984, total_tokens=1006), prompt_filter_results=[{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}])"
       ]
      },
-     "execution_count": 37,
+     "execution_count": 60,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -618,7 +618,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 38,
+   "execution_count": 61,
    "id": "2a306356-b0af-4d92-a79a-d46bd19a8b23",
    "metadata": {},
    "outputs": [
@@ -640,10 +640,10 @@
     {
      "data": {
       "text/plain": [
-       "\"I have created a new table called 'Players' with the information of the people below the age of 30:\\n\\n| Name  | Age | Location | Sport      | Team     |\\n|-------|-----|----------|------------|----------|\\n| David | 28  | Miami    | Swimming   | Sharks   |\\n| Emily | 22  | Boston   | Volleyball | Eagles   |\\n| Anna  | 27  | Denver   | Golf       | Bears    |\\n| Leo   | 24  | Dallas   | Hockey     | Panthers |\\n| Alex  | 26  | Phoenix  | Archery    | Hawks    |\\n| Zoe   | 31  | Portland | Badminton  | Rabbits  |\\n\\nLet me know if there's anything else I can assist you with.\""
+       "ChatCompletion(id='chatcmpl-8asm4sMzPkLtt8y2PH3yZMVZaoOPp', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"I have created a new table called 'Players' with the information of the people below the age of 30:\\n\\n| Name  | Age | Location | Sport      | Team     |\\n|-------|-----|----------|------------|----------|\\n| David | 28  | Miami    | Swimming   | Sharks   |\\n| Emily | 22  | Boston   | Volleyball | Eagles   |\\n| Anna  | 27  | Denver   | Golf       | Bears    |\\n| Leo   | 24  | Dallas   | Hockey     | Panthers |\\n| Alex  | 26  | Phoenix  | Archery    | Hawks    |\\n| Zoe   | 31  | Portland | Badminton  | Rabbits  |\\n\\nLet me know if there's anything else I can assist you with.\", role='assistant', function_call=None, tool_calls=None), content_filter_results={'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}})], created=1703801052, model='gpt-35-turbo-16k', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=164, prompt_tokens=1134, total_tokens=1298), prompt_filter_results=[{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}])"
       ]
      },
-     "execution_count": 38,
+     "execution_count": 61,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -654,7 +654,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 39,
+   "execution_count": 62,
    "id": "59356d48-413a-46db-bcda-7d3e158df8ee",
    "metadata": {},
    "outputs": [
@@ -664,7 +664,7 @@
        "2"
       ]
      },
-     "execution_count": 39,
+     "execution_count": 62,
      "metadata": {},
      "output_type": "execute_result"
     }
diff --git a/docs/source/notebooks/cookbooks/cookbook.ipynb b/docs/source/notebooks/cookbooks/cookbook.ipynb
index 35e4230..feb7aa1 100644
--- a/docs/source/notebooks/cookbooks/cookbook.ipynb
+++ b/docs/source/notebooks/cookbooks/cookbook.ipynb
@@ -16,7 +16,6 @@
     "\n",
     "#  Examples\n",
     "\n",
-    "- [ReAct/Planning Agent](planning_agent.ipynb)\n",
     "- [Function Calling with Mistralai/Mistral-7B-Instruct-v0.1 through Anyscale Endpoints](anyscale.ipynb)\n"
    ]
   },
diff --git a/docs/source/notebooks/cookbooks/orchestration.ipynb b/docs/source/notebooks/cookbooks/orchestration.ipynb
index 0eb3ba5..9cf7171 100644
--- a/docs/source/notebooks/cookbooks/orchestration.ipynb
+++ b/docs/source/notebooks/cookbooks/orchestration.ipynb
@@ -14,32 +14,56 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 45,
+   "execution_count": 2,
    "id": "5e7451c8-ddb3-498a-96b4-f166fafd783a",
    "metadata": {},
    "outputs": [],
    "source": [
+    "import os\n",
     "import openai\n",
-    "import logging\n",
-    "from pydantic import BaseModel\n",
-    "from actionweaver.llms.openai.tools.chat import OpenAIChatCompletion\n",
-    "from actionweaver import action\n",
+    "from openai import OpenAI\n",
+    "\n",
+    "openai.api_key = os.getenv(\"OPENAI_API_KEY\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "3a46ccb2-033d-404c-8213-1481e0da9196",
+   "metadata": {},
+   "source": [
+    "**Patch OpenAI client**"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "id": "d3f2465e-527f-47f8-a0ae-6246343a09a4",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from actionweaver.llms import patch\n",
     "\n",
-    "from actionweaver.actions.factories.pydantic_model_to_action import action_from_model\n",
-    "openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n"
+    "openai_client = patch(OpenAI())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "e2fa587b-0637-4443-9353-33226e611df1",
+   "metadata": {},
+   "source": [
+    "**Define function you want model to invoke**"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 46,
+   "execution_count": 4,
    "id": "da75c6a8-14ad-43a9-9378-fc166604f5d2",
    "metadata": {},
    "outputs": [],
    "source": [
-    "import os\n",
+    "from actionweaver import action\n",
     "from typing import List\n",
     "\n",
-    "\n",
     "@action(name=\"FileHandler\")\n",
     "def handle_file(instruction: str) -> str:\n",
     "    \"\"\"\n",
@@ -83,10 +107,7 @@
     "    print (f\"Read file from {file_path}\")\n",
     "    with open(file_path, 'r') as file:\n",
     "        content = file.read()\n",
-    "    return f\"The file content: \\n{content}\"\n",
-    "\n",
-    "\n",
-    "chat = OpenAIChatCompletion(\"gpt-3.5-turbo\")\n"
+    "    return f\"The file content: \\n{content}\"\n"
    ]
   },
   {
@@ -108,16 +129,29 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 12,
    "id": "725aa18a-375f-4f1e-84fb-9155ec43f837",
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Handling file...\n",
+      "List all files in ....\n"
+     ]
+    }
+   ],
    "source": [
-    "chat.create([{\"role\": \"user\", \"content\": \"list all files, then read a file in current dir\"}], \n",
-    "            actions = [handle_file], \n",
-    "            orch = {\n",
-    "                handle_file: [read_from_file, list_all_files_in_repo]\n",
-    "            })"
+    "response = openai_client.chat.completions.create(\n",
+    "  model=\"gpt-3.5-turbo\",\n",
+    "  messages=[{\"role\": \"user\", \"content\": \"list all files\"}],\n",
+    "  actions = [handle_file],\n",
+    "  orch = {\n",
+    "        handle_file.name: [read_from_file, list_all_files_in_repo]\n",
+    "  },\n",
+    "  stream=False, \n",
+    ")"
    ]
   },
   {
@@ -127,6 +161,14 @@
    "metadata": {},
    "outputs": [],
    "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d763422d-435f-4776-ba81-f3030fc50476",
+   "metadata": {},
+   "outputs": [],
+   "source": []
   }
  ],
  "metadata": {
diff --git a/docs/source/notebooks/cookbooks/parallel_tools.ipynb b/docs/source/notebooks/cookbooks/parallel_tools.ipynb
index a9e6fcc..78a2ccd 100644
--- a/docs/source/notebooks/cookbooks/parallel_tools.ipynb
+++ b/docs/source/notebooks/cookbooks/parallel_tools.ipynb
@@ -1,8 +1,18 @@
 {
  "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "5d447fc7-a904-4a9e-873d-7264644759bd",
+   "metadata": {},
+   "source": [
+    "Parallel function calls are available in the following models: gpt-4-1106-preview and gpt-3.5-turbo-1106.\n",
+    "\n",
+    "For the most accurate and detailed information, please visit the [OpenAI documentation directly at OpenAI Platform Guides - Function Calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)."
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": 8,
    "id": "f6297385-a0c7-4dff-9595-72721d4a9f9a",
    "metadata": {},
    "outputs": [],
@@ -11,54 +21,23 @@
     "import os\n",
     "import openai\n",
     "import itertools\n",
+    "from openai import OpenAI\n",
+    "from actionweaver.llms import patch\n",
     "\n",
     "\n",
-    "from actionweaver.llms.openai.tools.chat import OpenAIChatCompletion\n",
-    "from actionweaver.llms.openai.tools.tokens import TokenUsageTracker\n",
-    "from actionweaver import action\n",
-    "\n",
     "openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n",
     "\n",
-    "\n",
-    " \n",
-    "\n",
-    "def print_output(output):\n",
-    "    from collections.abc import Iterable\n",
-    "    if isinstance(output, str):\n",
-    "        print (output)\n",
-    "    elif isinstance(output, Iterable):\n",
-    "        for chunk in output:\n",
-    "            content = chunk.choices[0].delta.content\n",
-    "            if content is not None:\n",
-    "                print(content, end='')\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "id": "0a8a394a-4f10-49fc-b453-19c84c81fd36",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "logging.basicConfig(\n",
-    "    filename='parallel_tools.log',\n",
-    "    filemode='a',\n",
-    "    format='%(asctime)s.%(msecs)04d %(levelname)s {%(module)s} [%(funcName)s] %(message)s',\n",
-    "    level=logging.INFO,\n",
-    "    datefmt='%Y-%m-%d %H:%M:%S'\n",
-    ")\n",
-    "\n",
-    "logger = logging.getLogger(__name__)\n",
-    "logger.setLevel(logging.DEBUG)"
+    "openai_client = patch(OpenAI())"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 4,
    "id": "082399ea-2ba9-4f42-a17d-82de79a65ced",
    "metadata": {},
    "outputs": [],
    "source": [
+    "from actionweaver import action\n",
     "\n",
     "@action(name=\"GetWeather\")\n",
     "def get_current_weather(location, unit=\"fahrenheit\"):\n",
@@ -92,39 +71,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 11,
    "id": "e2c60439-71bc-4830-beb5-85446d162329",
    "metadata": {},
-   "outputs": [],
-   "source": [
-    "chat = OpenAIChatCompletion(\"gpt-3.5-turbo-1106\", logger=logger)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "id": "1c5bbfed-b683-46ab-9bdf-280b307584e9",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "action_handler, orch = chat.build_orch([get_current_weather, get_current_time], None)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "acf2fa75-0616-4cbc-9d6c-463ab80d2cfb",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "action_handler.name_to_action"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "id": "b93a7364-b820-4a3a-9970-a7a380ea3786",
-   "metadata": {},
    "outputs": [
     {
      "name": "stdout",
@@ -133,118 +82,46 @@
       "Getting current time\n",
       "Getting current weather\n",
       "Getting current weather\n",
-      "Getting current weather\n",
-      "> \u001b[0;32m/Users/tenghu/Code/actiontools/actionweaver/llms/openai/tools/chat.py\u001b[0m(121)\u001b[0;36m_invoke_tool\u001b[0;34m()\u001b[0m\n",
-      "\u001b[0;32m    119 \u001b[0;31m        \u001b[0mpdb\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_trace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
-      "\u001b[0m\u001b[0;32m    120 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n",
-      "\u001b[0m\u001b[0;32m--> 121 \u001b[0;31m        \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcalled_tools\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
-      "\u001b[0m\u001b[0;32m    122 \u001b[0;31m            \u001b[0;31m# Update new functions for next OpenAI api call\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
-      "\u001b[0m\u001b[0;32m    123 \u001b[0;31m            \u001b[0mname\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcalled_tools\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
-      "\u001b[0m\n"
-     ]
-    },
-    {
-     "name": "stdin",
-     "output_type": "stream",
-     "text": [
-      "ipdb>  c\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "The current time is 15:36:19.309300. \n",
-      "The weather in San Francisco is 72°F. \n",
-      "The weather in Tokyo is 10°C. \n",
-      "The weather in Paris is 22°C.\n"
+      "Getting current weather\n"
      ]
     }
    ],
    "source": [
-    "print_output(chat.create([{\"role\": \"user\", \"content\": \"what time is it now and What's the weather like in San Francisco, Tokyo, and Paris?\"}], actions = [get_current_weather, get_current_time], stream=False))\n",
-    "\n",
-    "\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 11,
-   "id": "f5577e14-d159-438f-b4eb-043ec5c5c38a",
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Getting current time\n",
-      "The current time is 20:18:11."
-     ]
-    }
-   ],
-   "source": [
-    "print_output(get_current_time.invoke(chat, [{\"role\": \"user\", \"content\": \"what time is it ?\"}], stream=True))\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 12,
-   "id": "02b7a373-48b6-4f32-898d-a2229c436b3b",
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "[User(name='Tom', age=31)]"
-      ]
-     },
-     "execution_count": 12,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "from pydantic import BaseModel\n",
-    "from actionweaver.actions.factories.pydantic_model_to_action import action_from_model\n",
-    "\n",
-    "class User(BaseModel):\n",
-    "    name: str\n",
-    "    age: int\n",
-    "\n",
-    "action_from_model(User, stop=True).invoke(chat, [{\"role\": \"user\", \"content\": \"Tom is 31 years old\"}])"
+    "response = openai_client.chat.completions.create(\n",
+    "  model=\"gpt-3.5-turbo-1106\",\n",
+    "  messages=[{\"role\": \"user\", \"content\": \"what time is it now and What's the weather like in San Francisco, Tokyo, and Paris?\"}],\n",
+    "  actions = [get_current_weather, get_current_time],\n",
+    "  stream=False, \n",
+    ")\n"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 13,
-   "id": "9bb34477-e2ee-4a24-9905-d8114b8a6e80",
+   "id": "b93a7364-b820-4a3a-9970-a7a380ea3786",
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Getting current time\n",
-      "Getting current weather\n",
-      "The current time is 20:18:18 in your specified time zone.\n",
+      "The current time is 2023-12-28 17:37:10.030683.\n",
       "\n",
-      "As for the weather in New York, the temperature is currently unknown.\n"
+      "The weather in:\n",
+      "- San Francisco is 72°F\n",
+      "- Tokyo is 10°C\n",
+      "- Paris is 22°C\n"
      ]
     }
    ],
    "source": [
-    "print_output(chat.create([{\"role\": \"user\", \"content\": \"what time is it now\"}], actions = [get_current_time], orch = {get_current_time: get_current_weather}, stream=False))\n",
-    "\n",
-    "\n",
-    "\n",
-    "\n"
+    "print (response.choices[0].message.content)"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "7347e0bb-f7c2-434a-ad9f-1b78863449dc",
+   "id": "f5577e14-d159-438f-b4eb-043ec5c5c38a",
    "metadata": {},
    "outputs": [],
    "source": []
diff --git a/docs/source/notebooks/cookbooks/quickstart.ipynb b/docs/source/notebooks/cookbooks/quickstart.ipynb
index 6ae96c7..a4b152d 100644
--- a/docs/source/notebooks/cookbooks/quickstart.ipynb
+++ b/docs/source/notebooks/cookbooks/quickstart.ipynb
@@ -20,26 +20,15 @@
    "id": "16250eaa-15b7-4848-81cf-ac62cd8cefa9",
    "metadata": {},
    "source": [
-    "## Use ActionWeaver and OpenAI API"
+    "**Use ActionWeaver and OpenAI API**"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 72,
+   "execution_count": 1,
    "id": "d682d986-a3ac-4f4d-a967-a6eaccef0413",
    "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "<function actionweaver.llms.patch.patch(client: Union[openai.OpenAI, openai.AsyncOpenAI, openai.lib.azure.AsyncAzureOpenAI, openai.lib.azure.AzureOpenAI])>"
-      ]
-     },
-     "execution_count": 72,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
+   "outputs": [],
    "source": [
     "from actionweaver.llms.openai.tools.tokens import TokenUsageTracker\n",
     "from actionweaver.llms import patch\n",
@@ -51,7 +40,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 38,
+   "execution_count": 2,
    "id": "22710e02-f81f-4050-8c2c-01cd64e48f32",
    "metadata": {},
    "outputs": [],
@@ -90,7 +79,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 44,
+   "execution_count": 4,
    "id": "69c84261-b03b-4333-8cea-8315241c69a5",
    "metadata": {},
    "outputs": [
@@ -105,10 +94,10 @@
     {
      "data": {
       "text/plain": [
-       "\"The current weather in San Francisco is 72°F. However, I couldn't retrieve the current weather for Beijing.\""
+       "ChatCompletion(id='chatcmpl-8asF0WseqNkjLTsoTjUTRi4wPR7gO', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The current temperature in San Francisco is 72°F. However, I am unable to retrieve the current temperature for Beijing at the moment.', role='assistant', function_call=None, tool_calls=None))], created=1703799002, model='gpt-3.5-turbo-0613', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=28, prompt_tokens=155, total_tokens=183))"
       ]
      },
-     "execution_count": 44,
+     "execution_count": 4,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -125,7 +114,6 @@
     "  messages=messages,\n",
     "    actions = [get_current_weather],\n",
     "    stream=False, \n",
-    "    logger=logger,\n",
     "    token_usage_tracker = TokenUsageTracker(500),\n",
     ")\n",
     "\n",
@@ -145,12 +133,12 @@
    "id": "92e1741f-3e0a-4673-9e2d-683d8e51ff47",
    "metadata": {},
    "source": [
-    "## Use ActionWeaver and Azure OpenAI Service"
+    "**Use ActionWeaver and Azure OpenAI Service**"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 52,
+   "execution_count": 5,
    "id": "f12f7870-aa76-4755-ab89-96737ca9a790",
    "metadata": {},
    "outputs": [],
@@ -167,7 +155,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 48,
+   "execution_count": 7,
    "id": "f342800e-fd6e-4195-9da9-44a4cc713f65",
    "metadata": {},
    "outputs": [
@@ -182,10 +170,10 @@
     {
      "data": {
       "text/plain": [
-       "\"The current weather in San Francisco is 72 degrees Fahrenheit. However, I couldn't find the current weather temperature for Beijing.\""
+       "ChatCompletion(id='chatcmpl-8asF8SNGGczPhuQnTY6WP7IjY6KJt', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The current weather in San Francisco is 72°F. However, the current weather in Beijing is currently unknown.', role='assistant', function_call=None, tool_calls=None), content_filter_results={'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}})], created=1703799010, model='gpt-35-turbo-16k', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=23, prompt_tokens=155, total_tokens=178), prompt_filter_results=[{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}])"
       ]
      },
-     "execution_count": 48,
+     "execution_count": 7,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -203,7 +191,6 @@
     "  stream=False,\n",
     "  actions = [get_current_weather],\n",
     "  token_usage_tracker = TokenUsageTracker(500),\n",
-    "  logger=logger\n",
     ")\n",
     "\n",
     "response"
@@ -214,12 +201,12 @@
    "id": "b04d6414-c04c-4fbe-bfeb-8cb9effc2b39",
    "metadata": {},
    "source": [
-    "### Force execute an action"
+    "**Force execute an action**"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 54,
+   "execution_count": 8,
    "id": "ff6aa568-cdc5-45bc-8ee5-4c111bc347fc",
    "metadata": {},
    "outputs": [
@@ -233,10 +220,10 @@
     {
      "data": {
       "text/plain": [
-       "'The current time is 23:21 (11:21 PM).'"
+       "ChatCompletion(id='chatcmpl-8asFCTvOY4IngXDVtt35BSj8vE7Me', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The current time is 16:30.', role='assistant', function_call=None, tool_calls=None))], created=1703799014, model='gpt-3.5-turbo-0613', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=10, prompt_tokens=98, total_tokens=108))"
       ]
      },
-     "execution_count": 54,
+     "execution_count": 8,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -247,7 +234,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 56,
+   "execution_count": 9,
    "id": "f6635698-7886-414d-9cb9-84f6ad28ccb2",
    "metadata": {},
    "outputs": [
@@ -261,10 +248,10 @@
     {
      "data": {
       "text/plain": [
-       "'The current time is 23:21.'"
+       "ChatCompletion(id='chatcmpl-8asFHNakresQO5RhS56IyTSZf76RL', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The current time is 2023-12-28 16:30:19.', role='assistant', function_call=None, tool_calls=None), content_filter_results={'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}})], created=1703799019, model='gpt-35-turbo-16k', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=19, prompt_tokens=98, total_tokens=117), prompt_filter_results=[{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}])"
       ]
      },
-     "execution_count": 56,
+     "execution_count": 9,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -278,14 +265,14 @@
    "id": "24e9543b-1947-4545-9d8e-7aed7833c565",
    "metadata": {},
    "source": [
-    "### Stop the Action in the loop\n",
+    "**Stop the Action in the loop**\n",
     "\n",
     "Every action comes with a stop argument, which is set to False by default, if True this means that the LLM will immediately return the function's output if chosen, but this also restricts the LLM from making multiple function calls. For instance, if asked about the weather in NYC and San Francisco, the model would invoke two separate functions sequentially for each city. However, with `stop=True`, this process is interrupted once the first function returns weather information for either NYC or San Francisco, depending on which city it queries first."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 67,
+   "execution_count": 10,
    "id": "c4178e1d-ef6c-4954-b342-729f73b4736d",
    "metadata": {},
    "outputs": [],
@@ -295,7 +282,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 68,
+   "execution_count": 11,
    "id": "aad3616a-18ab-4809-893c-171a0c625245",
    "metadata": {},
    "outputs": [
@@ -312,7 +299,7 @@
        "'{\"location\": \"San Francisco\", \"temperature\": \"72\", \"unit\": \"fahrenheit\"}'"
       ]
      },
-     "execution_count": 68,
+     "execution_count": 11,
      "metadata": {},
      "output_type": "execute_result"
     }
@@ -320,14 +307,6 @@
    "source": [
     "get_current_weather.invoke(openai_client, messages=[{\"role\": \"user\", \"content\": \"what weather is San Francisco\"}], model=\"gpt-3.5-turbo\", stream=False, force=True)\n"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "bfadce88-7bc9-41b0-9db2-926985931c69",
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
diff --git a/docs/source/notebooks/cookbooks/stateful_agent.ipynb b/docs/source/notebooks/cookbooks/stateful_agent.ipynb
index d1459e5..5620684 100644
--- a/docs/source/notebooks/cookbooks/stateful_agent.ipynb
+++ b/docs/source/notebooks/cookbooks/stateful_agent.ipynb
@@ -5,82 +5,44 @@
    "id": "02433815-7bb9-4e61-be67-db4854f0c403",
    "metadata": {},
    "source": [
-    "# Stateful Agent"
+    "# Actions of Stateful Agent\n",
+    "Developers also could create a class and enhance its functionality using ActionWeaver's action decorators."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "id": "70c94281-1d38-4e34-bbd0-92ff70227482",
    "metadata": {},
    "outputs": [],
    "source": [
-    "import logging\n",
     "import os\n",
     "import openai\n",
-    "import itertools\n",
-    "\n",
+    "from openai import OpenAI\n",
+    "from actionweaver.llms import patch\n",
+    "from actionweaver import action\n",
     "from typing import List\n",
     "\n",
-    "from actionweaver.llms.openai.functions.chat import OpenAIChatCompletion\n",
-    "from actionweaver.llms.openai.functions.tokens import TokenUsageTracker\n",
-    "from actionweaver import action, SelectOne, RequireNext\n",
-    "\n",
-    "from actionweaver.mixins.examples import LangChainTools, Folium, OpenAIAPI\n",
-    "openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n",
-    "\n",
-    "\n",
-    "def print_output(output):\n",
-    "    from collections.abc import Iterable\n",
-    "    if isinstance(output, str):\n",
-    "        print (output)\n",
-    "    elif isinstance(output, Iterable):\n",
-    "        for chunk in output:\n",
-    "            content = chunk.choices[0].delta.content\n",
-    "            if content is not None:\n",
-    "                print(content, end='')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "id": "08caafb5-37ca-4c69-a893-58e3313ef9f9",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "logging.basicConfig(\n",
-    "    filename='agent.log',\n",
-    "    filemode='a',\n",
-    "    format='%(asctime)s.%(msecs)04d %(levelname)s {%(module)s} [%(funcName)s] %(message)s',\n",
-    "    level=logging.INFO,\n",
-    "    datefmt='%Y-%m-%d %H:%M:%S'\n",
-    ")\n",
     "\n",
-    "logger = logging.getLogger(__name__)\n",
-    "logger.setLevel(logging.DEBUG)"
+    "openai.api_key = os.getenv(\"OPENAI_API_KEY\")"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "id": "079d94a0-19ba-4874-8db3-0b1f28230da8",
    "metadata": {},
    "outputs": [],
    "source": [
     "class AgentV0:\n",
-    "    def __init__(self, logger):\n",
-    "        self.logger = logger\n",
-    "        self.token_tracker = TokenUsageTracker(budget=None, logger=logger)\n",
-    "        self.llm = OpenAIChatCompletion(\"gpt-3.5-turbo\", token_usage_tracker = self.token_tracker, logger=logger)\n",
-    "        \n",
-    "        self.messages = [{\"role\": \"system\", \"content\": \"You are a resourceful assistant.\"}]\n",
+    "    def __init__(self):\n",
+    "        self.llm = patch(OpenAI())\n",
+    "        self.messages = []\n",
     "        self.times = []\n",
-    "\n",
+    "    \n",
     "    def __call__(self, text):\n",
     "        self.messages += [{\"role\": \"user\", \"content\":text}]\n",
-    "        response = self.llm.create(messages=self.messages, actions = [self.get_current_time, self.sleep], stream=True)\n",
-    "\n",
-    "        return response\n",
+    "        return self.llm.chat.completions.create(model=\"gpt-3.5-turbo\", messages=self.messages, actions = [self.get_current_time])\n",
     "        \n",
     "    @action(name=\"GetCurrentTime\")\n",
     "    def get_current_time(self) -> str:\n",
@@ -96,216 +58,96 @@
     "        \n",
     "        return f\"The current time is {current_time}\"\n",
     "\n",
-    "\n",
-    "    @action(name=\"Sleep\")\n",
-    "    def sleep(self, seconds: int) -> str:\n",
-    "        \"\"\"\n",
-    "        Introduces a sleep delay of the specified seconds and returns a message.\n",
-    "    \n",
-    "        Args:\n",
-    "            seconds (int): The duration to sleep in seconds.\n",
-    "    \n",
-    "        Returns:\n",
-    "            str: A message indicating the completion of the sleep.\n",
-    "        \"\"\"\n",
-    "        import time\n",
-    "        time.sleep(seconds)\n",
-    "        return f\"Now I wake up after sleep {seconds} seconds.\"\n",
-    "\n",
-    "\n",
-    "    @action(name=\"Ask\")\n",
-    "    def ask(self, question: str) -> str:\n",
-    "        \"\"\"\n",
-    "        Invoke this if you want to ask question, or there is anything need clarification.\n",
-    "    \n",
-    "        Args:\n",
-    "            question (str): The question to ask the user.\n",
-    "        \"\"\"\n",
-    "        ans = input(f\"Question: {question}\\n\")\n",
-    "        return ans\n",
-    "\n",
-    "\n",
-    "\n",
-    "agent = AgentV0(logger)"
+    "agent = AgentV0()\n"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": null,
    "id": "c9138451-e0b5-44bd-b768-2c9f25bbcedb",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "The current time is 19:36."
-     ]
-    }
-   ],
-   "source": [
-    "print_output(agent(\"what time is it\"))"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "id": "842e1792-3210-4be8-85c9-f6c669dfe127",
-   "metadata": {},
    "outputs": [],
    "source": [
-    "class AgentV1(AgentV0, LangChainTools, Folium, OpenAIAPI):\n",
-    "    def __call__(self, text):\n",
-    "        self.messages += [{\"role\": \"user\", \"content\":text}]\n",
-    "        response = self.llm.create(messages=self.messages, actions = [self.search, self.show_map, self.get_current_time, self.sleep], stream=True)\n",
+    "# You can invoke actions just like regular instance methods\n",
+    "agent.get_current_time() # Output: 'The current time is 20:34.'\n",
     "\n",
-    "        return response\n",
     "\n",
-    "agent = AgentV1(logger)"
+    "agent(\"what time is it\")"
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": 6,
-   "id": "1fffbbb7-c8e3-4a98-9564-01ed054c5443",
+   "cell_type": "markdown",
+   "id": "8381c18a-302f-4976-bd39-5c16fe68e2dd",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Langchain is a framework for developing applications powered by language models. It simplifies the process of creating generative AI application interfaces and enables developers to build context-aware, reasoning language model applications. Langchain provides tools and APIs that make it easy to connect language models to other data sources and interact with them. It is an open-source library that supports Python and JavaScript.\n",
-      "\n",
-      "Haystack, on the other hand, is an open-source Python framework for building production-ready language model applications. It offers tooling for every stage of the NLP (Natural Language Processing) project life cycle. Haystack is built around the concept of pipelines, which are powerful structures that perform NLP tasks. It provides an orchestration framework for language models and offers components that can be connected together to create pipelines.\n",
-      "\n",
-      "Please note that there may be other unrelated results for the term \"Haystack\" such as the Haystack app, Haystack News, or Haystack Rock."
-     ]
-    }
-   ],
    "source": [
-    "print_output(agent(\"search what is Langchain, what is haystack\"))"
+    "**Grouping and Extending Actions Through Inheritance**"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "b280e164-f499-410c-801e-2bf81a376c76",
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "76c3a82d-d8da-4a74-9e2d-c263bceb107e",
+   "id": "842e1792-3210-4be8-85c9-f6c669dfe127",
    "metadata": {},
    "outputs": [],
    "source": [
-    "class FileUtility(AgentV0):\n",
-    "    @action(name=\"FileHandler\", orch_expr=SelectOne(['FileHandler', 'ListFiles', 'ReadFile']))\n",
-    "    def handle_file(self, instruction: str) -> str:\n",
+    "class LangChainTools:\n",
+    "    @action(name=\"GoogleSearch\")\n",
+    "    def google_search(self, query: str) -> str:\n",
     "        \"\"\"\n",
-    "        Handles user instructions related to file operations. Put every context in the instruction only!\n",
-    "    \n",
-    "        Args:\n",
-    "            instruction (str): The user's instruction about file handling.\n",
-    "    \n",
-    "        Returns:\n",
-    "            str: The response to the user's question.\n",
-    "        \"\"\"\n",
-    "        return instruction\n",
+    "        Perform a Google search using the provided query. \n",
     "        \n",
+    "        This action requires `langchain` and `google-api-python-client` installed, and GOOGLE_API_KEY, GOOGLE_CSE_ID environment variables.\n",
+    "        See https://python.langchain.com/docs/integrations/tools/google_search.\n",
     "\n",
-    "    @action(name=\"ListFiles\", scope=\"file\")\n",
-    "    def list_all_files_in_repo(self, repo_path: str ='.') -> List:\n",
+    "        :param query: The search query to be used for the Google search.\n",
+    "        :return: The search results as a string.\n",
     "        \"\"\"\n",
-    "        Lists all the files in the given repository.\n",
-    "    \n",
-    "        :param repo_path: Path to the repository. Defaults to the current directory.\n",
-    "        :return: List of file paths.\n",
-    "        \"\"\"\n",
-    "\n",
-    "        logger.info(f\"list_all_files_in_repo: {repo_path}\")\n",
-    "        \n",
-    "        file_list = []\n",
-    "        for root, _, files in os.walk(repo_path):\n",
-    "            for file in files:\n",
-    "                file_list.append(os.path.join(root, file))\n",
-    "            break\n",
-    "        return file_list\n",
+    "        from langchain.utilities import GoogleSearchAPIWrapper\n",
     "\n",
-    "    @action(name=\"ReadFile\", scope=\"file\")\n",
-    "    def read_from_file(self, file_path: str) -> str:\n",
-    "        \"\"\"\n",
-    "        Reads the content of a file and returns it as a string.\n",
+    "        search = GoogleSearchAPIWrapper()\n",
+    "        return search.run(query)\n",
     "    \n",
-    "        :param file_path: The path to the file that needs to be read.\n",
-    "        :return: A string containing the content of the file.\n",
-    "        \"\"\"\n",
-    "        logger.info(f\"read_from_file: {file_path}\")\n",
-    "        \n",
-    "        with open(file_path, 'r') as file:\n",
-    "            content = file.read()\n",
-    "        return f\"The file content: \\n{content}\"\n",
-    "\n",
-    "    @action(name=\"WriteFile\", scope=\"file\")\n",
-    "    def write_to_file(self, file_path: str, content: str) -> str:\n",
-    "        \"\"\"\n",
-    "        Writes the given content to a file.\n",
-    "        \n",
-    "        :param file_path: The path to the file where the content should be written.\n",
-    "        :param content: The content to be written to the file.\n",
-    "        \"\"\"\n",
-    "        try:\n",
-    "            with open(file_path, 'a') as file:\n",
-    "                file.write(content)\n",
-    "            return \"Content successfully appended to the file.\"\n",
-    "        except Exception as e:\n",
-    "            return f\"An error occurred while appending to the file: {str(e)}\"\n",
-    "        \n",
-    "\n",
-    "\n",
-    "class AgentV2(FileUtility):\n",
+    "class AgentV1(AgentV0, LangChainTools):\n",
     "    def __call__(self, text):\n",
     "        self.messages += [{\"role\": \"user\", \"content\":text}]\n",
-    "        response = self.llm.create(messages=self.messages, actions = [self.search, self.show_map, self.get_current_time, self.sleep], stream=True)\n",
+    "        return self.llm.chat.completions.create(model=\"gpt-3.5-turbo\", messages=self.messages, actions = [self.google_search])\n",
     "\n",
-    "        return response"
+    "agent = AgentV1()\n",
+    "agent(\"what happened today\")"
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "74993086-26fb-415e-b37e-d5cb1914b2d5",
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "ae3afcae-1d71-47ed-9870-0922e97a75d5",
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "0c862b6c-12c0-4a4c-ad12-dfc420e7985c",
+   "cell_type": "markdown",
+   "id": "389be3b9-cd9f-4b52-aef1-0745f2164f87",
    "metadata": {},
-   "outputs": [],
-   "source": []
+   "source": [
+    "We could use parameter `orch` when calling the chat completion API. This feature will allow us for more precise control over the specific set of tools available to the LLM during each interaction.\n",
+    "\n",
+    "Example:\n",
+    "```python\n",
+    "client.chat.completions.create(\n",
+    "    messages = ...\n",
+    "    actions=[a1, a2, a3], # First, LLM respond with either a1, a2 or a3, or text without action\n",
+    "    # Define the orchestration logic for actions:\n",
+    "    orch={\n",
+    "        a1.name: [a2, a3],  # If a1 is invoked, the next response will be either a2, a3 or a text response.\n",
+    "        a2.name: a3,      # If a2 is invoked, the next action will be a3\n",
+    "        a3.name: [a4]     # If a3 is invoked, the next response will be a4 or a text response.\n",
+    "        a4.name: None     # If a4 is invoked, the next response will guarantee to be a text message\n",
+    "    }\n",
+    ")\n",
+    "```\n",
+    "\n",
+    "For details please take a look at [here](https://github.com/TengHu/ActionWeaver?tab=readme-ov-file#orchestration-of-actions-experimental )\n"
+   ]
   },
   {
    "cell_type": "code",
-   "execution_count": 30,
-   "id": "4cbecb67-b8ac-4492-ae5b-980650907dc5",
+   "execution_count": 34,
+   "id": "1fffbbb7-c8e3-4a98-9564-01ed054c5443",
    "metadata": {},
    "outputs": [],
    "source": [
-    "from typing import List\n",
-    "import os\n",
     "class FileAgent(AgentV0):\n",
     "    @action(name=\"FileHandler\")\n",
     "    def handle_file(self, instruction: str) -> str:\n",
@@ -356,44 +198,53 @@
     "\n",
     "    def __call__(self, text):\n",
     "        self.messages += [{\"role\": \"user\", \"content\":text}]\n",
-    "        return self.llm.chat.completions.create(model=\"gpt-3.5-turbo\", messages=self.messages, actions = [self.list_all_files_in_repo], orch = {self.handle_file: [self.list_all_files_in_repo, self.read_from_file]})\n"
+    "        return self.llm.chat.completions.create(model=\"gpt-3.5-turbo\", messages=self.messages, actions = [self.handle_file], orch = {self.handle_file.name: [self.list_all_files_in_repo, self.read_from_file]})\n",
+    "\n"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 31,
+   "execution_count": 35,
    "id": "9ed4906a-b57e-4f78-b9a1-bea1c2a195ce",
    "metadata": {},
    "outputs": [],
    "source": [
-    "agent = FileUtility()"
+    "agent = FileAgent()"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 32,
+   "execution_count": 36,
    "id": "4e8eca2b-a052-4f38-9f57-3b42cfc362d1",
    "metadata": {},
    "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Handling list all files\n",
+      "list_all_files_in_repo: .\n"
+     ]
+    },
     {
      "data": {
       "text/plain": [
-       "'Here are the files in the current directory:\\n\\n1. index.html\\n2. styles.css\\n3. script.js\\n4. image.jpg\\n5. README.md'"
+       "ChatCompletion(id='chatcmpl-8atToDzgZh5C9gh4xNWYsJtsiaNPS', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Here are the files in the current repository:\\n\\n- langsmith.ipynb\\n- azure_tutorial-Copy1.ipynb\\n- parallel_tools.log\\n- untitled.md\\n- parallel_tools.ipynb\\n- test.log\\n- stateful_agent.ipynb\\n- huggingface.ipynb\\n- anyscale.ipynb\\n- ReAct.ipynb\\n- structured_extraction.log\\n- quickstart.ipynb\\n- structured_extraction.ipynb\\n- azure_tutorial.ipynb\\n- litellm.ipynb\\n- cookbook.ipynb\\n- agent.log\\n- orchestration.ipynb', role='assistant', function_call=None, tool_calls=None))], created=1703803764, model='gpt-3.5-turbo-0613', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=130, prompt_tokens=256, total_tokens=386))"
       ]
      },
-     "execution_count": 32,
+     "execution_count": 36,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "agent(\"list all files in current dir\")"
+    "agent(\"Take file action [list all files in current repository]\")"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "daf3f44c-6641-46fd-9b1b-2a320e33459a",
+   "id": "9d0d2274-c011-4cb8-9710-da1b34bbbb07",
    "metadata": {},
    "outputs": [],
    "source": []
diff --git a/docs/source/notebooks/cookbooks/structured_extraction.ipynb b/docs/source/notebooks/cookbooks/structured_extraction.ipynb
index 140e069..aa6a97b 100644
--- a/docs/source/notebooks/cookbooks/structured_extraction.ipynb
+++ b/docs/source/notebooks/cookbooks/structured_extraction.ipynb
@@ -5,53 +5,47 @@
    "id": "8e469c96-d2b3-459c-afd5-182e9717257e",
    "metadata": {},
    "source": [
-    "# Structured Extraction\n",
+    "# Structured Data Extraction: Utilizing Pydantic and ActionWeaver\n",
     "\n",
-    "In this example, we will demonstrate how to use Pydantic and ActionWeaver for structured data extraction"
+    "This guide is designed to demonstrate the process of using Pydantic along with ActionWeaver for structured data extraction.\n"
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": 4,
-   "id": "7bac7b1d-6119-4434-afd7-310ff720fcc2",
+   "cell_type": "markdown",
+   "id": "4fec464c-f265-4e77-b75b-d01b853b56ec",
    "metadata": {},
-   "outputs": [],
    "source": [
-    "import logging\n",
-    "from pydantic import BaseModel\n",
-    "from actionweaver.actions.factories.pydantic_model_to_action import action_from_model\n",
-    "\n",
-    "logging.basicConfig(\n",
-    "    filename='structured_extraction.log',\n",
-    "    filemode='a',\n",
-    "    format='%(asctime)s.%(msecs)04d %(levelname)s {%(module)s} [%(funcName)s] %(message)s',\n",
-    "    level=logging.INFO,\n",
-    "    datefmt='%Y-%m-%d %H:%M:%S'\n",
-    ")\n",
-    "\n",
-    "logger = logging.getLogger(__name__)\n",
-    "logger.setLevel(logging.DEBUG)"
+    "**Step 1**: Patch OpenAI client"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 11,
-   "id": "b561f238-c916-4de0-b776-cd90d45e999c",
+   "execution_count": 5,
+   "id": "7bac7b1d-6119-4434-afd7-310ff720fcc2",
    "metadata": {},
    "outputs": [],
    "source": [
+    "from pydantic import BaseModel\n",
+    "from actionweaver.actions.factories.pydantic_model_to_action import action_from_model\n",
     "import os\n",
     "from openai import OpenAI\n",
-    "\n",
     "from actionweaver.llms import patch\n",
     "\n",
     "\n",
     "client = patch(OpenAI())"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "8e41e0d2-1d3b-49bb-be2a-9e9bebd52db3",
+   "metadata": {},
+   "source": [
+    "**Step 2**: Define function you want model to invoke"
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 12,
+   "execution_count": 7,
    "id": "e27716ae-f4f6-4908-92b3-ba81911ac8d2",
    "metadata": {},
    "outputs": [],
@@ -84,7 +78,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 14,
+   "execution_count": 8,
    "id": "d023ef2e-6fed-4841-aae1-0a9b3e990d23",
    "metadata": {},
    "outputs": [
@@ -94,19 +88,19 @@
        "Users(users=[User(name='Tom', age=31), User(name='James', age=25)])"
       ]
      },
-     "execution_count": 14,
+     "execution_count": 8,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "action_from_model(Users, stop=True).invoke(client, messages=[{\"role\": \"user\", \"content\": \"Tom is 31 years old, James is 25 years old\"}], model=\"gpt-3.5-turbo\", stream=False, force=False)\n"
+    "action_from_model(Users, stop=True).invoke(client, messages=[{\"role\": \"user\", \"content\": \"Tom is 31 years old, James is 25 years old\"}], model=\"gpt-3.5-turbo\", stream=False, force=False)"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "1b19a344-6a41-494d-a129-8a6e7bf9681b",
+   "id": "859caacf-1e9f-4049-82e6-2ccccc49392a",
    "metadata": {},
    "outputs": [],
    "source": []
diff --git a/pyproject.toml b/pyproject.toml
index 88e594e..f1c03d6 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
 [tool.poetry]
 name = "actionweaver"
-version = "0.0.19"
+version = "0.0.20"
 description = "An Application Framework for Building LLM Agents"
 authors = ['Teng "Niel" Hu <hu.niel92@gmail.com>']
 readme = "README.md"