Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 28 additions & 8 deletions docs/my-website/docs/providers/chatgpt.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@ Use ChatGPT Pro/Max subscription models through LiteLLM with OAuth device flow a

| Property | Details |
|-------|-------|
| Description | ChatGPT subscription access (Codex + GPT-5.2 family) via ChatGPT backend API |
| Description | ChatGPT subscription access (Codex + GPT-5.3/5.4 family) via ChatGPT backend API |
| Provider Route on LiteLLM | `chatgpt/` |
| Supported Endpoints | `/responses`, `/chat/completions` (bridged to Responses for supported models) |
| API Reference | https://chatgpt.com |

ChatGPT subscription access is native to the Responses API. Chat Completions requests are bridged to Responses for supported models (for example `chatgpt/gpt-5.2`).
ChatGPT subscription access is native to the Responses API. Chat Completions requests are bridged to Responses for supported models (for example `chatgpt/gpt-5.4`).

Notes:
- The ChatGPT subscription backend rejects token limit fields (`max_tokens`, `max_output_tokens`, `max_completion_tokens`) and `metadata`. LiteLLM strips these fields for this provider.
Expand All @@ -31,7 +31,7 @@ ChatGPT subscription access uses an OAuth device code flow:
import litellm

response = litellm.responses(
model="chatgpt/gpt-5.2-codex",
model="chatgpt/gpt-5.3-codex",
input="Write a Python hello world"
)

Expand All @@ -44,7 +44,7 @@ print(response)
import litellm

response = litellm.completion(
model="chatgpt/gpt-5.2",
model="chatgpt/gpt-5.4",
messages=[{"role": "user", "content": "Write a Python hello world"}]
)

Expand All @@ -55,16 +55,36 @@ print(response)

```yaml showLineNumbers title="config.yaml"
model_list:
- model_name: chatgpt/gpt-5.2
- model_name: chatgpt/gpt-5.4
model_info:
mode: responses
litellm_params:
model: chatgpt/gpt-5.2
- model_name: chatgpt/gpt-5.2-codex
model: chatgpt/gpt-5.4
- model_name: chatgpt/gpt-5.4-pro
model_info:
mode: responses
litellm_params:
model: chatgpt/gpt-5.2-codex
model: chatgpt/gpt-5.4-pro
- model_name: chatgpt/gpt-5.3-codex
model_info:
mode: responses
litellm_params:
model: chatgpt/gpt-5.3-codex
- model_name: chatgpt/gpt-5.3-codex-spark
model_info:
mode: responses
litellm_params:
model: chatgpt/gpt-5.3-codex-spark
- model_name: chatgpt/gpt-5.3-instant
model_info:
mode: responses
litellm_params:
model: chatgpt/gpt-5.3-instant
- model_name: chatgpt/gpt-5.3-chat-latest
model_info:
mode: responses
litellm_params:
model: chatgpt/gpt-5.3-chat-latest
```
```bash showLineNumbers title="Start LiteLLM Proxy"
Expand Down
4 changes: 4 additions & 0 deletions docs/my-website/docs/providers/openai.md
Original file line number Diff line number Diff line change
Expand Up @@ -192,8 +192,12 @@ os.environ["OPENAI_BASE_URL"] = "https://your_host/v1" # OPTIONAL
| gpt-5.2-2025-12-11 | `response = completion(model="gpt-5.2-2025-12-11", messages=messages)` |
| gpt-5.2-chat-latest | `response = completion(model="gpt-5.2-chat-latest", messages=messages)` |
| gpt-5.3-chat-latest | `response = completion(model="gpt-5.3-chat-latest", messages=messages)` |
| gpt-5.4 | `response = completion(model="gpt-5.4", messages=messages)` |
| gpt-5.4-2026-03-05 | `response = completion(model="gpt-5.4-2026-03-05", messages=messages)` |
| gpt-5.2-pro | `response = completion(model="gpt-5.2-pro", messages=messages)` |
| gpt-5.2-pro-2025-12-11 | `response = completion(model="gpt-5.2-pro-2025-12-11", messages=messages)` |
| gpt-5.4-pro | `response = completion(model="gpt-5.4-pro", messages=messages)` |
| gpt-5.4-pro-2026-03-05 | `response = completion(model="gpt-5.4-pro-2026-03-05", messages=messages)` |
| gpt-5.1 | `response = completion(model="gpt-5.1", messages=messages)` |
| gpt-5.1-codex | `response = completion(model="gpt-5.1-codex", messages=messages)` |
| gpt-5.1-codex-mini | `response = completion(model="gpt-5.1-codex-mini", messages=messages)` |
Expand Down
149 changes: 149 additions & 0 deletions litellm/model_prices_and_context_window_backup.json
Original file line number Diff line number Diff line change
Expand Up @@ -18437,6 +18437,93 @@
"max_tokens": 8191,
"mode": "embedding"
},
"chatgpt/gpt-5.4": {
"litellm_provider": "chatgpt",
"max_input_tokens": 1050000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"supported_endpoints": [
"/v1/chat/completions",
"/v1/responses"
],
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_response_schema": true,
"supports_vision": true
},
"chatgpt/gpt-5.4-pro": {
"litellm_provider": "chatgpt",
"max_input_tokens": 1050000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"supported_endpoints": [
"/v1/responses"
],
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_response_schema": true,
"supports_vision": true
},
"chatgpt/gpt-5.3-codex": {
"litellm_provider": "chatgpt",
"max_input_tokens": 128000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"supported_endpoints": [
"/v1/responses"
],
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_response_schema": true,
"supports_vision": true
},
"chatgpt/gpt-5.3-codex-spark": {
"litellm_provider": "chatgpt",
"max_input_tokens": 128000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"supported_endpoints": [
"/v1/responses"
],
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_response_schema": true,
"supports_vision": true
},
"chatgpt/gpt-5.3-instant": {
"litellm_provider": "chatgpt",
"max_input_tokens": 128000,
"max_output_tokens": 64000,
"max_tokens": 64000,
"mode": "responses",
"supported_endpoints": [
"/v1/chat/completions",
"/v1/responses"
],
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_response_schema": true,
"supports_vision": true
},
"chatgpt/gpt-5.3-chat-latest": {
"litellm_provider": "chatgpt",
"max_input_tokens": 128000,
"max_output_tokens": 64000,
"max_tokens": 64000,
"mode": "responses",
"supported_endpoints": [
"/v1/chat/completions",
"/v1/responses"
],
"supports_function_calling": true,
"supports_parallel_function_calling": true,
"supports_response_schema": true,
"supports_vision": true
},
"chatgpt/gpt-5.2-codex": {
"litellm_provider": "chatgpt",
"max_input_tokens": 128000,
Expand Down Expand Up @@ -20891,6 +20978,68 @@
"supports_service_tier": true,
"supports_vision": true
},
"gpt-5.4-pro": {
"cache_read_input_token_cost": 2e-06,
"input_cost_per_token": 2e-05,
"litellm_provider": "openai",
"max_input_tokens": 1050000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"output_cost_per_token": 0.00012,
"supported_endpoints": [
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true,
"supports_web_search": true
},
"gpt-5.4-pro-2026-03-05": {
"cache_read_input_token_cost": 2e-06,
"input_cost_per_token": 2e-05,
"litellm_provider": "openai",
"max_input_tokens": 1050000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"output_cost_per_token": 0.00012,
"supported_endpoints": [
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true,
"supports_web_search": true
},
"gpt-5-pro": {
"input_cost_per_token": 1.5e-05,
"input_cost_per_token_batches": 7.5e-06,
Expand Down
Loading
Loading