-
-
Notifications
You must be signed in to change notification settings - Fork 11
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
refactor: rename
time_frame
to period
- Loading branch information
Showing
6 changed files
with
64 additions
and
60 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,25 +1,27 @@ | ||
import os | ||
import os | ||
|
||
import openai # pip install openai | ||
|
||
openai.api_key = os.environ.get("OPENAI_KEY") | ||
|
||
import IPython.display | ||
|
||
import funix | ||
import funix | ||
|
||
|
||
@funix.funix( # Funix.io, the laziest way to build web apps in Python | ||
title="OpenAI: Dall-E", | ||
description="""Generate an image with DALL-E in [Funix](http://funix.io), the minimalist way to build apps in Python. An OpenAI key needs to be set. A rate limit is applied. """, | ||
rate_limit=funix.decorator.Limiter.session(max_calls=1, time_frame=60*60*24), | ||
rate_limit=funix.decorator.Limiter.session(max_calls=1, period=60 * 60 * 24), | ||
show_source=True, | ||
) | ||
def dalle(Prompt: str = "a cat on a red jeep") -> IPython.display.Image: | ||
response = openai.Image.create(prompt=Prompt, size="256x256") | ||
return response["data"][0]["url"] | ||
|
||
|
||
# **Note:** | ||
# * An OpenAI key needs to be set in the environment variable OPENAI_KEY. | ||
# **Note:** | ||
# * An OpenAI key needs to be set in the environment variable OPENAI_KEY. | ||
# * A rate limit of 1 call per day per browser session is set. | ||
|
||
# Like us? Please star us on [GitHub](https://github.com/TexteaInc/funix)]. | ||
# Like us? Please star us on [GitHub](https://github.com/TexteaInc/funix)]. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,18 +1,22 @@ | ||
import os # Python's native | ||
import openai # you cannot skip it | ||
import os # Python's native | ||
import openai # you cannot skip it | ||
|
||
openai.api_key = os.environ.get("OPENAI_KEY") | ||
|
||
import funix | ||
@funix.funix(rate_limit=funix.decorator.Limiter.session(max_calls=2, time_frame=60*60*24)) | ||
|
||
# If in lazy model, the two lines above should be commented out. | ||
|
||
@funix.funix( | ||
rate_limit=funix.decorator.Limiter.session(max_calls=2, period=60 * 60 * 24) | ||
) | ||
|
||
# If in lazy model, the two lines above should be commented out. | ||
# Lazy model means run this command | ||
# $ funix -l chatGPT_lazy.py | ||
|
||
|
||
def ChatGPT(prompt: str) -> str: | ||
completion = openai.ChatCompletion.create( | ||
messages=[{"role": "user", "content": prompt}], | ||
model="gpt-3.5-turbo" | ||
messages=[{"role": "user", "content": prompt}], model="gpt-3.5-turbo" | ||
) | ||
return completion["choices"][0]["message"]["content"] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,40 +1,42 @@ | ||
# Building a one-turn chatbot from any causal language model hosted on HuggingFace using free Inference API | ||
|
||
# Copyleft 2023 Forrest Sheng Bao http://forrestbao.github.io | ||
# The purpose of this code is to demonstrate the use of Funix to turn a simple API call function to a web app. | ||
# Copyleft 2023 Forrest Sheng Bao http://forrestbao.github.io | ||
# The purpose of this code is to demonstrate the use of Funix to turn a simple API call function to a web app. | ||
|
||
# To turn this code into a web app, run the following command in the terminal: | ||
# funix huggingface.py -l # the -l flag is very important. It tells Funix to load the function as a web app. | ||
|
||
import os, json, typing # Python's native | ||
import requests # pip install requests | ||
import os, json, typing # Python's native | ||
import requests # pip install requests | ||
import ipywidgets | ||
|
||
# API_TOKEN = os.getenv("HF_TOKEN") # "Please set your API token as an environment variable named HF_TOKEN. You can get your token from https://huggingface.co/settings/token" | ||
|
||
import funix | ||
|
||
|
||
@funix.funix( | ||
description="""Talk to LLMs hosted at HuggingFace. A HuggingFace token needs to be set in the environment variable HF_TOKEN.""", | ||
# rate_limit=funix.decorator.Limiter.session(max_calls=20, time_frame=60*60*24), | ||
# rate_limit=funix.decorator.Limiter.session(max_calls=20, period=60*60*24), | ||
) | ||
def huggingface( | ||
model_name: typing.Literal[ | ||
"gpt2", | ||
"bigcode/starcoder", | ||
"google/flan-t5-base"] = "gpt2", | ||
prompt: str = "Who is Einstein?", | ||
API_TOKEN: ipywidgets.Password = None | ||
) -> str: | ||
|
||
payload = {"inputs": prompt, "max_tokens":200} # not all models use this query and output formats. Hence, we limit the models above. | ||
"gpt2", "bigcode/starcoder", "google/flan-t5-base" | ||
] = "gpt2", | ||
prompt: str = "Who is Einstein?", | ||
API_TOKEN: ipywidgets.Password = None, | ||
) -> str: | ||
payload = { | ||
"inputs": prompt, | ||
"max_tokens": 200, | ||
} # not all models use this query and output formats. Hence, we limit the models above. | ||
|
||
API_URL = f"https://api-inference.huggingface.co/models/{model_name}" | ||
headers = {"Authorization": f"Bearer {API_TOKEN.value}"} | ||
|
||
response = requests.post(API_URL, headers=headers, json=payload) | ||
|
||
if "error" in response.json(): | ||
if "error" in response.json(): | ||
return response.json()["error"] | ||
else: | ||
return response.json()[0]["generated_text"] | ||
return response.json()[0]["generated_text"] |