Skip to content

Commit 1dd54bc

Browse files
authored
Merge pull request #96 from CambioML/jojo-branch
update openai, pydantic, and chromadb versions and examples to work with uniflow
2 parents 1ef8276 + 8dece7b commit 1dd54bc

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+2396
-2157
lines changed

docker/pykoi-cpu-custom/app.py

+2-8
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,9 @@
44
##########################################################
55
# Creating an OpenAI model (requires an OpenAI API key) #
66
##########################################################
7-
# enter openai api key here
8-
api_key = "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"
97

108
# Creating an OpenAI model
11-
model = pykoi.ModelFactory.create_model(
12-
model_source="openai",
13-
api_key=api_key)
9+
model = pykoi.ModelFactory.create_model(model_source="openai")
1410

1511
#####################################
1612
# Creating a chatbot with the model #
@@ -25,9 +21,7 @@
2521
###########################################################
2622
# Create the application
2723
# app = pykoi.Application(debug=False, share=True)
28-
app = pykoi.Application(
29-
debug=False,
30-
share=True)
24+
app = pykoi.Application(debug=False, share=True)
3125
app.add_component(chatbot)
3226
app.add_component(dashboard)
3327
app.run()

docs/conf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -79,5 +79,5 @@
7979
""",
8080
"class": "",
8181
},
82-
]
82+
],
8383
}

example/chatbot/chatbot_in_jupyter.ipynb

+70-15
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"cells": [
33
{
44
"cell_type": "code",
5-
"execution_count": null,
5+
"execution_count": 7,
66
"id": "61b49dc2",
77
"metadata": {},
88
"outputs": [],
@@ -21,36 +21,74 @@
2121
},
2222
{
2323
"cell_type": "code",
24-
"execution_count": null,
24+
"execution_count": 8,
2525
"id": "6a907bb3",
2626
"metadata": {},
27-
"outputs": [],
27+
"outputs": [
28+
{
29+
"data": {
30+
"text/plain": [
31+
"True"
32+
]
33+
},
34+
"execution_count": 8,
35+
"metadata": {},
36+
"output_type": "execute_result"
37+
}
38+
],
2839
"source": [
2940
"from pykoi import Application\n",
3041
"from pykoi.chat import ModelFactory\n",
3142
"from pykoi.chat import QuestionAnswerDatabase\n",
32-
"from pykoi.component import Chatbot"
43+
"from pykoi.component import Chatbot\n",
44+
"from dotenv import load_dotenv\n",
45+
"load_dotenv()"
3346
]
3447
},
3548
{
3649
"cell_type": "code",
37-
"execution_count": null,
50+
"execution_count": 9,
3851
"id": "15c2004b",
3952
"metadata": {},
4053
"outputs": [],
4154
"source": [
42-
"api_key = \"\"\n",
43-
"\n",
4455
"# Creating an OpenAI model\n",
45-
"model = ModelFactory.create_model(model_source=\"openai\", api_key=api_key)"
56+
"model = ModelFactory.create_model(model_source=\"openai\")"
57+
]
58+
},
59+
{
60+
"cell_type": "markdown",
61+
"metadata": {},
62+
"source": [
63+
"### Add `nest_asyncio` \n",
64+
"Add `nest_asyncio` to avoid error. Since we're running another interface inside a Jupyter notebook where an asyncio event loop is already running, we'll encounter the error. (since The uvicorn.run() function uses asyncio.run(), which isn't compatible with a running event loop.)"
4665
]
4766
},
4867
{
4968
"cell_type": "code",
50-
"execution_count": null,
51-
"id": "0c07c943",
69+
"execution_count": 10,
5270
"metadata": {},
5371
"outputs": [],
72+
"source": [
73+
"import nest_asyncio\n",
74+
"nest_asyncio.apply()"
75+
]
76+
},
77+
{
78+
"cell_type": "code",
79+
"execution_count": 11,
80+
"id": "0c07c943",
81+
"metadata": {},
82+
"outputs": [
83+
{
84+
"name": "stdout",
85+
"output_type": "stream",
86+
"text": [
87+
"Table contents after creating table:\n",
88+
"ID: 1, Question: Who is Sam altman, Answer: He is the president of YC, Vote Status: n/a, Timestamp: 2023-12-20 13:37:43.095750\n"
89+
]
90+
}
91+
],
5492
"source": [
5593
"database = QuestionAnswerDatabase(debug=True)\n",
5694
"chatbot = Chatbot(model=model, feedback=\"vote\")\n",
@@ -61,14 +99,31 @@
6199
},
62100
{
63101
"cell_type": "code",
64-
"execution_count": null,
102+
"execution_count": 12,
65103
"id": "ae7bbef3",
66104
"metadata": {},
67-
"outputs": [],
105+
"outputs": [
106+
{
107+
"name": "stderr",
108+
"output_type": "stream",
109+
"text": [
110+
"INFO: Started server process [40457]\n",
111+
"INFO: Waiting for application startup.\n",
112+
"INFO: Application startup complete.\n",
113+
"INFO: Uvicorn running on http://0.0.0.0:5000 (Press CTRL+C to quit)\n"
114+
]
115+
}
116+
],
68117
"source": [
69-
"# import nest_asyncio\n",
70-
"app.display()"
118+
"app.run()"
71119
]
120+
},
121+
{
122+
"cell_type": "code",
123+
"execution_count": null,
124+
"metadata": {},
125+
"outputs": [],
126+
"source": []
72127
}
73128
],
74129
"metadata": {
@@ -87,7 +142,7 @@
87142
"name": "python",
88143
"nbconvert_exporter": "python",
89144
"pygments_lexer": "ipython3",
90-
"version": "3.10.12"
145+
"version": "3.10.13"
91146
}
92147
},
93148
"nbformat": 4,

example/chatbot/demo_launch_app_cpu_openai.ipynb

+50-162
Large diffs are not rendered by default.

example/chatbot/demo_launch_app_cpu_openai.py

+2-7
Original file line numberDiff line numberDiff line change
@@ -15,25 +15,20 @@
1515
python -m example.chatbot.demo_launch_app_cpu_openai
1616
```
1717
"""
18-
import os
1918

2019
from dotenv import load_dotenv
2120

2221
from pykoi import Application
23-
from pykoi.chat import ModelFactory
24-
from pykoi.chat import QuestionAnswerDatabase
22+
from pykoi.chat import ModelFactory, QuestionAnswerDatabase
2523
from pykoi.component import Chatbot, Dashboard
2624

2725
##########################################################
2826
# Creating an OpenAI model (requires an OpenAI API key) #
2927
##########################################################
3028
load_dotenv()
31-
api_key = os.getenv("OPENAI_API_KEY")
3229

3330
# Creating an OpenAI model
34-
model = ModelFactory.create_model(
35-
model_source="openai",
36-
api_key=api_key)
31+
model = ModelFactory.create_model(model_source="openai")
3732

3833
#####################################
3934
# Creating a chatbot with the model #

example/chatbot/demo_launch_app_gpu_huggingface.ipynb

+64-13
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
},
2222
{
2323
"cell_type": "code",
24-
"execution_count": null,
24+
"execution_count": 1,
2525
"metadata": {},
2626
"outputs": [],
2727
"source": [
@@ -44,7 +44,7 @@
4444
},
4545
{
4646
"cell_type": "code",
47-
"execution_count": null,
47+
"execution_count": 2,
4848
"metadata": {},
4949
"outputs": [],
5050
"source": [
@@ -66,7 +66,7 @@
6666
},
6767
{
6868
"cell_type": "code",
69-
"execution_count": null,
69+
"execution_count": 3,
7070
"metadata": {},
7171
"outputs": [],
7272
"source": [
@@ -75,12 +75,45 @@
7575
},
7676
{
7777
"cell_type": "code",
78-
"execution_count": null,
78+
"execution_count": 4,
7979
"metadata": {},
80-
"outputs": [],
80+
"outputs": [
81+
{
82+
"name": "stderr",
83+
"output_type": "stream",
84+
"text": [
85+
"/opt/conda/envs/pykoi/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
86+
" from .autonotebook import tqdm as notebook_tqdm\n"
87+
]
88+
},
89+
{
90+
"name": "stdout",
91+
"output_type": "stream",
92+
"text": [
93+
"[HuggingfaceModel] loading model...\n"
94+
]
95+
},
96+
{
97+
"name": "stderr",
98+
"output_type": "stream",
99+
"text": [
100+
"\n",
101+
"WARNING: You are currently loading Falcon using legacy code contained in the model repository. Falcon has now been fully ported into the Hugging Face transformers library. For the most up-to-date and high-performance version of the Falcon model code, please update to the latest version of transformers and then load the model without the trust_remote_code=True argument.\n",
102+
"\n",
103+
"Loading checkpoint shards: 100%|██████████| 2/2 [00:36<00:00, 18.18s/it]\n"
104+
]
105+
},
106+
{
107+
"name": "stdout",
108+
"output_type": "stream",
109+
"text": [
110+
"[HuggingfaceModel] loading tokenizer...\n"
111+
]
112+
}
113+
],
81114
"source": [
82115
"model = ModelFactory.create_model(\n",
83-
" model_source=\"huggingface\", \n",
116+
" model_source=\"huggingface\",\n",
84117
" pretrained_model_name_or_path=\"tiiuae/falcon-7b\",\n",
85118
" trust_remote_code=True, ## TODO: set as default\n",
86119
" load_in_8bit=True\n",
@@ -89,7 +122,7 @@
89122
},
90123
{
91124
"cell_type": "code",
92-
"execution_count": null,
125+
"execution_count": 5,
93126
"metadata": {},
94127
"outputs": [],
95128
"source": [
@@ -115,7 +148,7 @@
115148
},
116149
{
117150
"cell_type": "code",
118-
"execution_count": null,
151+
"execution_count": 6,
119152
"metadata": {},
120153
"outputs": [],
121154
"source": [
@@ -132,7 +165,7 @@
132165
},
133166
{
134167
"cell_type": "code",
135-
"execution_count": null,
168+
"execution_count": 7,
136169
"metadata": {},
137170
"outputs": [],
138171
"source": [
@@ -143,9 +176,27 @@
143176
},
144177
{
145178
"cell_type": "code",
146-
"execution_count": null,
179+
"execution_count": 8,
147180
"metadata": {},
148-
"outputs": [],
181+
"outputs": [
182+
{
183+
"name": "stderr",
184+
"output_type": "stream",
185+
"text": [
186+
"INFO: Started server process [7578]\n",
187+
"INFO: Waiting for application startup.\n",
188+
"INFO: Application startup complete.\n",
189+
"INFO: Uvicorn running on http://0.0.0.0:5000 (Press CTRL+C to quit)\n"
190+
]
191+
},
192+
{
193+
"name": "stdout",
194+
"output_type": "stream",
195+
"text": [
196+
"Public URL: https://a63d9b47dea54a.lhr.life\n"
197+
]
198+
}
199+
],
149200
"source": [
150201
"app = Application(debug=False, share=True)\n",
151202
"app.add_component(chatbot)\n",
@@ -208,7 +259,7 @@
208259
"kernelspec": {
209260
"display_name": "pykoi",
210261
"language": "python",
211-
"name": "0731a"
262+
"name": "python3"
212263
},
213264
"language_info": {
214265
"codemirror_mode": {
@@ -220,7 +271,7 @@
220271
"name": "python",
221272
"nbconvert_exporter": "python",
222273
"pygments_lexer": "ipython3",
223-
"version": "3.10.12"
274+
"version": "3.10.13"
224275
}
225276
},
226277
"nbformat": 4,

example/chatbot/demo_launch_app_gpu_huggingface.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,9 @@
1212
```
1313
"""
1414
from pykoi import Application
15-
from pykoi.chat import ModelFactory
16-
from pykoi.chat import QuestionAnswerDatabase
15+
from pykoi.chat import ModelFactory, QuestionAnswerDatabase
1716
from pykoi.component import Chatbot, Dashboard
1817

19-
2018
###################################################################################
2119
# Creating a Huggingface model tiiuae/falcon-7b (EC2 g5.4xlarge with 100GB space) #
2220
###################################################################################

example/chatbot/demo_launch_app_gpu_huggingface_peft.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,9 @@
1313
"""
1414

1515
from pykoi import Application
16-
from pykoi.chat import ModelFactory
17-
from pykoi.chat import QuestionAnswerDatabase
16+
from pykoi.chat import ModelFactory, QuestionAnswerDatabase
1817
from pykoi.component import Chatbot, Dashboard
1918

20-
2119
###################################################################################
2220
# Creating a Huggingface model tiiuae/falcon-7b (EC2 g5.4xlarge with 100GB space) #
2321
###################################################################################

0 commit comments

Comments
 (0)