diff --git a/docs/guide/llms/llms.md b/docs/guide/llms/llms.md index 88722d1b..647233c9 100644 --- a/docs/guide/llms/llms.md +++ b/docs/guide/llms/llms.md @@ -10,10 +10,12 @@ Autolabel makes it easy to try out different LLMs for your task and this page wi To use models from [OpenAI](https://platform.openai.com/docs/models), you can set `provider` to `openai` when creating a labeling configuration. The specific model that will be queried can be specified using the `name` key. Autolabel currently supports the following models from OpenAI: * `text-davinci-003` -* `gpt-3.5-turbo` -* `gpt-4` (8K Context) +* `gpt-3.5-turbo` and `gpt-3.5-turbo-0613` (4,096 max tokens) +* `gpt-3.5-turbo-16k` and `gpt-3.5-turbo-16k--613` (16,384 max tokens) +* `gpt-4` and `gpt-4-0613` (8,192 max tokens) +* `gpt-4-32k` and `gpt-4-32k-0613` (32,768 max tokens) -`gpt-4` (8K Context) is the most capable (and most expensive) model from OpenAI, while `gpt-3.5-turbo` is the cheapest (but still quite capable). `gpt-4` costs 15 times `gpt-3.5-turbo`, at $0.03/1K input tokens and $0.06/1K output tokens while `gpt-3.5-turbo` costs only $0.0015/1K input tokens and $0.002/1K output tokens. `text-davinci-003` model on the other hand costs $0.02/1K tokens. Detailed pricing for these models is available [here](https://openai.com/pricing). +`gpt-4` set of models are the most capable (and most expensive) from OpenAI, while `gpt-3.5-turbo` set of models are cheap (but still quite capable). Detailed pricing for these models is available [here](https://openai.com/pricing). ### Setup To use OpenAI models with Autolabel, make sure to first install the relevant packages by running: diff --git a/pyproject.toml b/pyproject.toml index 545de3f0..0387ec42 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ dependencies = [ "numpy >= 1.23.0", "requests >= 2.27.0", "datasets >= 2.7.0", - "langchain >= 0.0.190", + "langchain >= 0.0.194", "nervaluate >= 0.1.8", "pandas >= 1.3.0", "scikit-learn >= 1.0.0", diff --git a/src/autolabel/models/openai.py b/src/autolabel/models/openai.py index 8b1e3a1c..b83b5b6c 100644 --- a/src/autolabel/models/openai.py +++ b/src/autolabel/models/openai.py @@ -16,7 +16,11 @@ class OpenAILLM(BaseModel): "gpt-3.5-turbo", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-16k-0613", "gpt-4", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0613", ] MODELS_WITH_TOKEN_PROBS = ["text-curie-001", "text-davinci-003"] @@ -39,7 +43,11 @@ class OpenAILLM(BaseModel): "gpt-3.5-turbo": 0.0015 / 1000, "gpt-3.5-turbo-0613": 0.0015 / 1000, "gpt-3.5-turbo-16k": 0.003 / 1000, + "gpt-3.5-turbo-16k-0613": 0.003 / 1000, "gpt-4": 0.03 / 1000, + "gpt-4-0613": 0.03 / 1000, + "gpt-4-32k": 0.06 / 1000, + "gpt-4-32k-0613": 0.06 / 1000, } COST_PER_COMPLETION_TOKEN = { "text-davinci-003": 0.02 / 1000, @@ -47,7 +55,11 @@ class OpenAILLM(BaseModel): "gpt-3.5-turbo": 0.002 / 1000, "gpt-3.5-turbo-0613": 0.002 / 1000, "gpt-3.5-turbo-16k": 0.004 / 1000, - "gpt-4": 0.06 / 1000, # $0.06 per 1000 tokens in response + "gpt-3.5-turbo-16k-0613": 0.004 / 1000, + "gpt-4": 0.06 / 1000, + "gpt-4-0613": 0.06 / 1000, + "gpt-4-32k": 0.12 / 1000, + "gpt-4-32k-0613": 0.12 / 1000, } @cached_property