diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index ce830574..63b24ee5 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -95,6 +95,8 @@ AsyncBetaRealtime, BetaSessions, AsyncBetaSessions, + BetaTranscriptionSessions, + AsyncBetaTranscriptionSessions, Responses, InputItems, AsyncResponses, @@ -131,6 +133,16 @@ AsyncIntegrationsModels, Providers, AsyncProviders, + Webhooks, + AsyncWebhooks, + MainRealtime, + AsyncMainRealtime, + ClientSecrets, + AsyncClientSecrets, + Conversations, + AsyncConversations, + ConversationsItems, + AsyncConversationsItems, ) from portkey_ai.version import VERSION @@ -245,6 +257,8 @@ "AsyncBetaRealtime", "BetaSessions", "AsyncBetaSessions", + "BetaTranscriptionSessions", + "AsyncBetaTranscriptionSessions", "Responses", "InputItems", "AsyncResponses", @@ -281,4 +295,14 @@ "AsyncIntegrationsModels", "Providers", "AsyncProviders", + "Webhooks", + "AsyncWebhooks", + "MainRealtime", + "AsyncMainRealtime", + "ClientSecrets", + "AsyncClientSecrets", + "Conversations", + "AsyncConversations", + "ConversationsItems", + "AsyncConversationsItems", ] diff --git a/portkey_ai/_vendor/openai-1.86.0.dist-info/INSTALLER b/portkey_ai/_vendor/openai-1.107.2.dist-info/INSTALLER similarity index 100% rename from portkey_ai/_vendor/openai-1.86.0.dist-info/INSTALLER rename to portkey_ai/_vendor/openai-1.107.2.dist-info/INSTALLER diff --git a/portkey_ai/_vendor/openai-1.86.0.dist-info/METADATA b/portkey_ai/_vendor/openai-1.107.2.dist-info/METADATA similarity index 85% rename from portkey_ai/_vendor/openai-1.86.0.dist-info/METADATA rename to portkey_ai/_vendor/openai-1.107.2.dist-info/METADATA index 7a45185e..9445bef6 100644 --- a/portkey_ai/_vendor/openai-1.86.0.dist-info/METADATA +++ b/portkey_ai/_vendor/openai-1.107.2.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.3 Name: openai -Version: 1.86.0 +Version: 1.107.2 Summary: The official Python library for the openai API Project-URL: Homepage, https://github.com/openai/openai-python Project-URL: Repository, https://github.com/openai/openai-python @@ -18,6 +18,7 @@ Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Typing :: Typed Requires-Python: >=3.8 @@ -29,6 +30,9 @@ Requires-Dist: pydantic<3,>=1.9.0 Requires-Dist: sniffio Requires-Dist: tqdm>4 Requires-Dist: typing-extensions<5,>=4.11 +Provides-Extra: aiohttp +Requires-Dist: aiohttp; extra == 'aiohttp' +Requires-Dist: httpx-aiohttp>=0.1.8; extra == 'aiohttp' Provides-Extra: datalib Requires-Dist: numpy>=1; extra == 'datalib' Requires-Dist: pandas-stubs>=1.1.0.11; extra == 'datalib' @@ -42,7 +46,8 @@ Description-Content-Type: text/markdown # OpenAI Python API library -[![PyPI version](https://img.shields.io/pypi/v/openai.svg)](https://pypi.org/project/openai/) + +[![PyPI version](https://img.shields.io/pypi/v/openai.svg?label=pypi%20(stable))](https://pypi.org/project/openai/) The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, @@ -187,6 +192,44 @@ asyncio.run(main()) Functionality between the synchronous and asynchronous clients is otherwise identical. +### With aiohttp + +By default, the async client uses `httpx` for HTTP requests. However, for improved concurrency performance you may also use `aiohttp` as the HTTP backend. + +You can enable this by installing `aiohttp`: + +```sh +# install from PyPI +pip install openai[aiohttp] +``` + +Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: + +```python +import asyncio +from openai import DefaultAioHttpClient +from openai import AsyncOpenAI + + +async def main() -> None: + async with AsyncOpenAI( + api_key="My API Key", + http_client=DefaultAioHttpClient(), + ) as client: + chat_completion = await client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-4o", + ) + + +asyncio.run(main()) +``` + ## Streaming responses We provide support for streaming responses using Server Side Events (SSE). @@ -229,7 +272,7 @@ async def main(): asyncio.run(main()) ``` -## Realtime API beta +## Realtime API The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a WebSocket connection. @@ -246,7 +289,7 @@ from openai import AsyncOpenAI async def main(): client = AsyncOpenAI() - async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection: + async with client.realtime.connect(model="gpt-realtime") as connection: await connection.session.update(session={'modalities': ['text']}) await connection.conversation.item.create( @@ -280,7 +323,7 @@ Whenever an error occurs, the Realtime API will send an [`error` event](https:// ```py client = AsyncOpenAI() -async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection: +async with client.realtime.connect(model="gpt-realtime") as connection: ... async for event in connection: if event.type == 'error': @@ -409,6 +452,86 @@ client.files.create( The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically. +## Webhook Verification + +Verifying webhook signatures is _optional but encouraged_. + +For more information about webhooks, see [the API docs](https://platform.openai.com/docs/guides/webhooks). + +### Parsing webhook payloads + +For most use cases, you will likely want to verify the webhook and parse the payload at the same time. To achieve this, we provide the method `client.webhooks.unwrap()`, which parses a webhook request and verifies that it was sent by OpenAI. This method will raise an error if the signature is invalid. + +Note that the `body` parameter must be the raw JSON string sent from the server (do not parse it first). The `.unwrap()` method will parse this JSON for you into an event object after verifying the webhook was sent from OpenAI. + +```python +from openai import OpenAI +from flask import Flask, request + +app = Flask(__name__) +client = OpenAI() # OPENAI_WEBHOOK_SECRET environment variable is used by default + + +@app.route("/webhook", methods=["POST"]) +def webhook(): + request_body = request.get_data(as_text=True) + + try: + event = client.webhooks.unwrap(request_body, request.headers) + + if event.type == "response.completed": + print("Response completed:", event.data) + elif event.type == "response.failed": + print("Response failed:", event.data) + else: + print("Unhandled event type:", event.type) + + return "ok" + except Exception as e: + print("Invalid signature:", e) + return "Invalid signature", 400 + + +if __name__ == "__main__": + app.run(port=8000) +``` + +### Verifying webhook payloads directly + +In some cases, you may want to verify the webhook separately from parsing the payload. If you prefer to handle these steps separately, we provide the method `client.webhooks.verify_signature()` to _only verify_ the signature of a webhook request. Like `.unwrap()`, this method will raise an error if the signature is invalid. + +Note that the `body` parameter must be the raw JSON string sent from the server (do not parse it first). You will then need to parse the body after verifying the signature. + +```python +import json +from openai import OpenAI +from flask import Flask, request + +app = Flask(__name__) +client = OpenAI() # OPENAI_WEBHOOK_SECRET environment variable is used by default + + +@app.route("/webhook", methods=["POST"]) +def webhook(): + request_body = request.get_data(as_text=True) + + try: + client.webhooks.verify_signature(request_body, request.headers) + + # Parse the body after verification + event = json.loads(request_body) + print("Verified event:", event) + + return "ok" + except Exception as e: + print("Invalid signature:", e) + return "Invalid signature", 400 + + +if __name__ == "__main__": + app.run(port=8000) +``` + ## Handling errors When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `openai.APIConnectionError` is raised. diff --git a/portkey_ai/_vendor/openai-1.86.0.dist-info/RECORD b/portkey_ai/_vendor/openai-1.107.2.dist-info/RECORD similarity index 51% rename from portkey_ai/_vendor/openai-1.86.0.dist-info/RECORD rename to portkey_ai/_vendor/openai-1.107.2.dist-info/RECORD index efe75eaa..9159d4ae 100644 --- a/portkey_ai/_vendor/openai-1.86.0.dist-info/RECORD +++ b/portkey_ai/_vendor/openai-1.107.2.dist-info/RECORD @@ -1,729 +1,975 @@ -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/__main__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_base_client.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_client.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_compat.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_constants.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_exceptions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_extras/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_extras/_common.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_extras/numpy_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_extras/pandas_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_extras/sounddevice_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_legacy_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_module_client.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_qs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_resource.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_streaming.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_types.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_utils/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_utils/_logs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_utils/_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_utils/_reflection.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_utils/_resources_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_utils/_streams.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_utils/_sync.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_utils/_transform.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_utils/_typing.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_utils/_utils.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/_version.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_api/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_api/_main.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_api/audio.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_api/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_api/chat/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_api/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_api/files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_api/image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_api/models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_cli.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_errors.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_progress.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_tools/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_tools/_main.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_tools/fine_tunes.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_tools/migrate.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/cli/_utils.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/helpers/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/helpers/local_audio_player.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/helpers/microphone.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/_old_api.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/_parsing/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/_parsing/_completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/_parsing/_responses.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/_pydantic.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/_tools.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/_validators.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/azure.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/streaming/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/streaming/_assistants.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/streaming/_deltas.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/streaming/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/streaming/chat/_completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/streaming/chat/_events.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/streaming/chat/_types.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/streaming/responses/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/streaming/responses/_events.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/streaming/responses/_responses.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/lib/streaming/responses/_types.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/pagination.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/audio/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/audio/audio.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/audio/speech.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/audio/transcriptions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/audio/translations.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/batches.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/assistants.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/beta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/chat/chat.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/chat/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/realtime/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/realtime/realtime.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/realtime/sessions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/realtime/transcription_sessions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/threads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/threads/messages.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/threads/runs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/threads/runs/runs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/threads/runs/steps.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/beta/threads/threads.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/chat/chat.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/chat/completions/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/chat/completions/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/chat/completions/messages.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/containers/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/containers/containers.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/containers/files/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/containers/files/content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/containers/files/files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/embeddings.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/evals/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/evals/evals.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/evals/runs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/evals/runs/output_items.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/evals/runs/runs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/fine_tuning/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/fine_tuning/alpha/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/fine_tuning/alpha/alpha.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/fine_tuning/alpha/graders.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/fine_tuning/checkpoints/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/fine_tuning/checkpoints/checkpoints.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/fine_tuning/checkpoints/permissions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/fine_tuning/fine_tuning.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/fine_tuning/jobs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/fine_tuning/jobs/checkpoints.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/fine_tuning/jobs/jobs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/images.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/moderations.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/responses/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/responses/input_items.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/responses/responses.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/uploads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/uploads/parts.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/uploads/uploads.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/vector_stores/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/vector_stores/file_batches.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/vector_stores/files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/resources/vector_stores/vector_stores.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/speech_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/speech_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/transcription.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/transcription_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/transcription_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/transcription_include.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/transcription_segment.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/transcription_stream_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/transcription_text_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/transcription_text_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/transcription_verbose.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/transcription_word.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/translation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/translation_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/translation_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio/translation_verbose.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/audio_response_format.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/auto_file_chunking_strategy_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/batch.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/batch_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/batch_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/batch_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/batch_request_counts.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_response_format_option.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_response_format_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_stream_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_tool_choice.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_tool_choice_function.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_tool_choice_function_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_tool_choice_option.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_tool_choice_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_tool_choice_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/assistant_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/code_interpreter_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/code_interpreter_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/file_search_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/file_search_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/function_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/function_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_create_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_create_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_delete_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_delete_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_deleted_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_retrieve_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_retrieve_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_truncate_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_truncate_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_truncated_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_with_reference.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/conversation_item_with_reference_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/error_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/input_audio_buffer_append_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/input_audio_buffer_append_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/input_audio_buffer_clear_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/input_audio_buffer_clear_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/input_audio_buffer_cleared_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/input_audio_buffer_commit_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/input_audio_buffer_commit_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/input_audio_buffer_committed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/input_audio_buffer_speech_started_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/rate_limits_updated_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/realtime_client_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/realtime_client_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/realtime_connect_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/realtime_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/realtime_response_status.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/realtime_response_usage.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/realtime_server_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_audio_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_audio_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_audio_transcript_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_audio_transcript_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_cancel_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_cancel_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_content_part_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_content_part_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_create_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_create_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_function_call_arguments_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_function_call_arguments_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_output_item_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_output_item_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_text_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/response_text_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/session.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/session_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/session_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/session_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/session_update_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/session_update_event_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/session_updated_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/transcription_session.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/transcription_session_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/transcription_session_update.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/transcription_session_update_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/realtime/transcription_session_updated_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/thread.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/thread_create_and_run_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/thread_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/thread_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/thread_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/annotation_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/file_citation_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/file_citation_delta_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/file_path_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/file_path_delta_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_file.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_file_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_file_content_block_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_file_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_file_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_file_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_url.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_url_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_url_content_block_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_url_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_url_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/image_url_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/message_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/message_content_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/message_content_part_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/message_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/message_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/message_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/message_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/message_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/message_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/refusal_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/refusal_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/required_action_function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/run.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/run_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/run_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/run_status.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/run_submit_tool_outputs_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/run_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/code_interpreter_logs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/code_interpreter_output_image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/code_interpreter_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/file_search_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/file_search_tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/function_tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/message_creation_step_details.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/run_step.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/run_step_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/run_step_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/run_step_delta_message_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/run_step_include.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/step_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/step_retrieve_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/tool_call_delta_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/runs/tool_calls_step_details.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/text_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/text_content_block_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/text_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/beta/threads/text_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_assistant_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_audio.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_audio_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_chunk.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_content_part_image_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_content_part_input_audio_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_content_part_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_content_part_refusal_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_content_part_text_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_developer_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_function_call_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_function_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_message_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_message_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_modality.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_named_tool_choice_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_prediction_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_reasoning_effort.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_role.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_store_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_stream_options_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_system_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_token_logprob.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_tool_choice_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_tool_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/chat_completion_user_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/completion_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/completion_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/completion_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/completions/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/completions/message_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/parsed_chat_completion.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat/parsed_function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/chat_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/completion.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/completion_choice.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/completion_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/completion_usage.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/container_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/container_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/container_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/container_list_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/container_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/containers/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/containers/file_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/containers/file_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/containers/file_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/containers/file_list_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/containers/file_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/containers/files/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/create_embedding_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/embedding.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/embedding_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/embedding_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/eval_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/eval_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/eval_custom_data_source_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/eval_delete_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/eval_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/eval_list_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/eval_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/eval_stored_completions_data_source_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/eval_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/eval_update_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/create_eval_completions_run_data_source.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/create_eval_completions_run_data_source_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/create_eval_jsonl_run_data_source.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/create_eval_jsonl_run_data_source_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/eval_api_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/run_cancel_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/run_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/run_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/run_delete_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/run_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/run_list_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/run_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/runs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/runs/output_item_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/runs/output_item_list_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/evals/runs/output_item_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/file_chunking_strategy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/file_chunking_strategy_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/file_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/file_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/file_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/file_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/file_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/file_purpose.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/alpha/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/alpha/grader_run_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/alpha/grader_run_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/alpha/grader_validate_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/alpha/grader_validate_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/checkpoints/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/checkpoints/permission_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/checkpoints/permission_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/checkpoints/permission_delete_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/checkpoints/permission_retrieve_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/checkpoints/permission_retrieve_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/dpo_hyperparameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/dpo_hyperparameters_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/dpo_method.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/dpo_method_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/fine_tuning_job.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/fine_tuning_job_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/fine_tuning_job_integration.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/fine_tuning_job_wandb_integration.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/job_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/job_list_events_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/job_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/jobs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/jobs/checkpoint_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/reinforcement_hyperparameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/reinforcement_hyperparameters_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/reinforcement_method.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/reinforcement_method_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/supervised_hyperparameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/supervised_hyperparameters_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/supervised_method.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/fine_tuning/supervised_method_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/label_model_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/label_model_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/multi_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/multi_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/python_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/python_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/score_model_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/score_model_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/string_check_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/string_check_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/text_similarity_grader.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/graders/text_similarity_grader_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/image_create_variation_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/image_edit_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/image_generate_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/image_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/images_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/model_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/moderation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/moderation_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/moderation_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/moderation_image_url_input_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/moderation_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/moderation_multi_modal_input_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/moderation_text_input_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/other_file_chunking_strategy_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/computer_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/computer_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/easy_input_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/easy_input_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/file_search_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/file_search_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/function_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/function_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/input_item_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/parsed_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_audio_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_audio_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_audio_transcript_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_audio_transcript_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_code_interpreter_call_code_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_code_interpreter_call_code_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_code_interpreter_call_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_code_interpreter_call_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_code_interpreter_call_interpreting_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_code_interpreter_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_code_interpreter_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_computer_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_computer_tool_call_output_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_computer_tool_call_output_screenshot.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_computer_tool_call_output_screenshot_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_computer_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_content_part_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_content_part_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_created_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_error_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_failed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_file_search_call_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_file_search_call_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_file_search_call_searching_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_file_search_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_file_search_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_format_text_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_format_text_config_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_format_text_json_schema_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_format_text_json_schema_config_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_function_call_arguments_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_function_call_arguments_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_function_tool_call_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_function_tool_call_output_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_function_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_function_web_search.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_function_web_search_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_image_gen_call_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_image_gen_call_generating_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_image_gen_call_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_image_gen_call_partial_image_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_includable.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_incomplete_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_content_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_file.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_file_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_image_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_item_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_message_content_list.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_message_content_list_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_message_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_input_text_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_item_list.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_mcp_call_arguments_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_mcp_call_arguments_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_mcp_call_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_mcp_call_failed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_mcp_call_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_mcp_list_tools_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_mcp_list_tools_failed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_mcp_list_tools_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_output_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_output_item_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_output_item_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_output_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_output_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_output_refusal.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_output_refusal_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_output_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_output_text_annotation_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_output_text_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_queued_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_reasoning_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_reasoning_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_reasoning_item.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_reasoning_item_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_reasoning_summary_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_reasoning_summary_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_reasoning_summary_part_added_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_reasoning_summary_part_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_reasoning_summary_text_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_reasoning_summary_text_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_refusal_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_refusal_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_retrieve_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_status.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_stream_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_text_config.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_text_config_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_text_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_text_done_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_usage.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_web_search_call_completed_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_web_search_call_in_progress_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/response_web_search_call_searching_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/tool_choice_function.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/tool_choice_function_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/tool_choice_options.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/tool_choice_types.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/tool_choice_types_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/web_search_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/responses/web_search_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/all_models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/chat_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/comparison_filter.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/compound_filter.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/error_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/function_definition.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/function_parameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/metadata.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/reasoning.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/reasoning_effort.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/response_format_json_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/response_format_json_schema.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/response_format_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared/responses_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/chat_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/comparison_filter.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/compound_filter.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/function_definition.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/function_parameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/metadata.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/reasoning.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/reasoning_effort.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/response_format_json_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/response_format_json_schema.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/response_format_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/shared_params/responses_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/static_file_chunking_strategy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/static_file_chunking_strategy_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/static_file_chunking_strategy_object_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/static_file_chunking_strategy_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/upload.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/upload_complete_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/upload_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/uploads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/uploads/part_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/uploads/upload_part.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_store.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_store_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_store_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_store_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_store_search_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_store_search_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_store_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_stores/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_stores/file_batch_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_stores/file_batch_list_files_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_stores/file_content_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_stores/file_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_stores/file_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_stores/file_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_stores/vector_store_file.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_stores/vector_store_file_batch.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/vector_stores/vector_store_file_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/types/websocket_connection_options.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-aryzpxs8/lib/python/openai/version.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/__main__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_base_client.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_client.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_compat.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_constants.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_exceptions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_extras/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_extras/_common.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_extras/numpy_proxy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_extras/pandas_proxy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_extras/sounddevice_proxy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_files.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_legacy_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_models.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_module_client.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_qs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_resource.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_streaming.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_types.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/_compat.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/_datetime_parse.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/_logs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/_proxy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/_reflection.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/_resources_proxy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/_streams.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/_sync.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/_transform.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/_typing.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_utils/_utils.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/_version.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_api/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_api/_main.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_api/audio.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_api/chat/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_api/chat/completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_api/completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_api/files.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_api/fine_tuning/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_api/fine_tuning/jobs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_api/image.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_api/models.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_cli.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_errors.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_models.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_progress.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_tools/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_tools/_main.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_tools/fine_tunes.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_tools/migrate.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/cli/_utils.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/helpers/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/helpers/local_audio_player.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/helpers/microphone.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/_old_api.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/_parsing/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/_parsing/_completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/_parsing/_responses.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/_pydantic.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/_tools.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/_validators.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/azure.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/streaming/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/streaming/_assistants.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/streaming/_deltas.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/streaming/chat/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/streaming/chat/_completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/streaming/chat/_events.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/streaming/chat/_types.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/streaming/responses/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/streaming/responses/_events.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/streaming/responses/_responses.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/lib/streaming/responses/_types.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/pagination.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/audio/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/audio/audio.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/audio/speech.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/audio/transcriptions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/audio/translations.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/batches.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/assistants.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/beta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/realtime/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/realtime/realtime.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/realtime/sessions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/realtime/transcription_sessions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/threads/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/threads/messages.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/threads/runs/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/threads/runs/runs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/threads/runs/steps.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/beta/threads/threads.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/chat/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/chat/chat.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/chat/completions/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/chat/completions/completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/chat/completions/messages.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/containers/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/containers/containers.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/containers/files/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/containers/files/content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/containers/files/files.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/conversations/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/conversations/conversations.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/conversations/items.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/embeddings.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/evals/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/evals/evals.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/evals/runs/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/evals/runs/output_items.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/evals/runs/runs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/files.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/fine_tuning/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/fine_tuning/alpha/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/fine_tuning/alpha/alpha.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/fine_tuning/alpha/graders.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/fine_tuning/checkpoints/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/fine_tuning/checkpoints/checkpoints.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/fine_tuning/checkpoints/permissions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/fine_tuning/fine_tuning.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/fine_tuning/jobs/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/fine_tuning/jobs/checkpoints.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/fine_tuning/jobs/jobs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/images.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/models.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/moderations.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/realtime/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/realtime/client_secrets.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/realtime/realtime.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/responses/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/responses/input_items.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/responses/responses.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/uploads/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/uploads/parts.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/uploads/uploads.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/vector_stores/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/vector_stores/file_batches.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/vector_stores/files.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/vector_stores/vector_stores.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/resources/webhooks.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/speech_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/speech_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/transcription.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/transcription_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/transcription_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/transcription_include.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/transcription_segment.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/transcription_stream_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/transcription_text_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/transcription_text_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/transcription_verbose.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/transcription_word.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/translation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/translation_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/translation_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio/translation_verbose.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/audio_response_format.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/auto_file_chunking_strategy_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/batch.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/batch_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/batch_error.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/batch_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/batch_request_counts.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_response_format_option.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_response_format_option_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_stream_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_tool_choice.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_tool_choice_function.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_tool_choice_function_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_tool_choice_option.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_tool_choice_option_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_tool_choice_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/assistant_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/chat/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/code_interpreter_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/code_interpreter_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/file_search_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/file_search_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/function_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/function_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_content_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_create_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_create_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_delete_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_delete_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_deleted_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_retrieve_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_retrieve_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_truncate_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_truncate_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_truncated_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_with_reference.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/conversation_item_with_reference_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/error_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/input_audio_buffer_append_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/input_audio_buffer_append_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/input_audio_buffer_clear_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/input_audio_buffer_clear_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/input_audio_buffer_cleared_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/input_audio_buffer_commit_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/input_audio_buffer_commit_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/input_audio_buffer_committed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/input_audio_buffer_speech_started_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/rate_limits_updated_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/realtime_client_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/realtime_client_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/realtime_connect_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/realtime_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/realtime_response_status.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/realtime_response_usage.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/realtime_server_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_audio_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_audio_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_audio_transcript_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_audio_transcript_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_cancel_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_cancel_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_content_part_added_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_content_part_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_create_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_create_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_function_call_arguments_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_function_call_arguments_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_output_item_added_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_output_item_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_text_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/response_text_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/session.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/session_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/session_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/session_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/session_update_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/session_update_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/session_updated_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/transcription_session.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/transcription_session_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/transcription_session_update.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/transcription_session_update_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/realtime/transcription_session_updated_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/thread.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/thread_create_and_run_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/thread_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/thread_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/thread_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/annotation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/annotation_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/file_citation_annotation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/file_citation_delta_annotation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/file_path_annotation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/file_path_delta_annotation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_file.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_file_content_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_file_content_block_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_file_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_file_delta_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_file_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_url.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_url_content_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_url_content_block_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_url_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_url_delta_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/image_url_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/message.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/message_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/message_content_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/message_content_part_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/message_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/message_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/message_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/message_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/message_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/message_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/refusal_content_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/refusal_delta_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/required_action_function_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/run.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/run_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/run_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/run_status.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/run_submit_tool_outputs_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/run_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/code_interpreter_logs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/code_interpreter_output_image.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/code_interpreter_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/file_search_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/file_search_tool_call_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/function_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/function_tool_call_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/message_creation_step_details.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/run_step.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/run_step_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/run_step_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/run_step_delta_message_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/run_step_include.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/step_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/step_retrieve_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/tool_call_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/tool_call_delta_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/runs/tool_calls_step_details.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/text.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/text_content_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/text_content_block_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/text_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/beta/threads/text_delta_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_allowed_tool_choice_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_allowed_tools_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_assistant_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_audio.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_audio_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_chunk.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_content_part_image.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_content_part_image_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_content_part_input_audio_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_content_part_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_content_part_refusal_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_content_part_text.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_content_part_text_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_custom_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_developer_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_function_call_option_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_function_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_function_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_function_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_message.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_message_custom_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_message_custom_tool_call_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_message_function_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_message_function_tool_call_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_message_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_message_tool_call_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_message_tool_call_union_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_modality.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_named_tool_choice_custom_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_named_tool_choice_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_prediction_content_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_reasoning_effort.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_role.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_store_message.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_stream_options_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_system_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_token_logprob.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_tool_choice_option_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_tool_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_tool_union_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/chat_completion_user_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/completion_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/completion_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/completion_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/completions/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/completions/message_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/parsed_chat_completion.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat/parsed_function_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/chat_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/completion.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/completion_choice.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/completion_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/completion_usage.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/container_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/container_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/container_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/container_list_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/container_retrieve_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/containers/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/containers/file_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/containers/file_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/containers/file_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/containers/file_list_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/containers/file_retrieve_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/containers/files/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/computer_screenshot_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/container_file_citation_body.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/conversation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/conversation_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/conversation_deleted_resource.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/conversation_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/conversation_item_list.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/conversation_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/file_citation_body.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/input_file_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/input_image_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/input_text_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/item_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/item_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/item_retrieve_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/lob_prob.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/message.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/output_text_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/refusal_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/summary_text_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/text_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/top_log_prob.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/conversations/url_citation_body.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/create_embedding_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/embedding.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/embedding_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/embedding_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/eval_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/eval_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/eval_custom_data_source_config.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/eval_delete_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/eval_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/eval_list_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/eval_retrieve_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/eval_stored_completions_data_source_config.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/eval_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/eval_update_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/create_eval_completions_run_data_source.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/create_eval_completions_run_data_source_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/create_eval_jsonl_run_data_source.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/create_eval_jsonl_run_data_source_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/eval_api_error.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/run_cancel_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/run_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/run_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/run_delete_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/run_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/run_list_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/run_retrieve_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/runs/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/runs/output_item_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/runs/output_item_list_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/evals/runs/output_item_retrieve_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/file_chunking_strategy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/file_chunking_strategy_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/file_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/file_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/file_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/file_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/file_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/file_purpose.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/alpha/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/alpha/grader_run_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/alpha/grader_run_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/alpha/grader_validate_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/alpha/grader_validate_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/checkpoints/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/checkpoints/permission_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/checkpoints/permission_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/checkpoints/permission_delete_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/checkpoints/permission_retrieve_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/checkpoints/permission_retrieve_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/dpo_hyperparameters.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/dpo_hyperparameters_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/dpo_method.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/dpo_method_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/fine_tuning_job.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/fine_tuning_job_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/fine_tuning_job_integration.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/fine_tuning_job_wandb_integration.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/job_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/job_list_events_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/job_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/jobs/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/jobs/checkpoint_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/reinforcement_hyperparameters.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/reinforcement_hyperparameters_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/reinforcement_method.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/reinforcement_method_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/supervised_hyperparameters.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/supervised_hyperparameters_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/supervised_method.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/fine_tuning/supervised_method_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/label_model_grader.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/label_model_grader_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/multi_grader.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/multi_grader_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/python_grader.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/python_grader_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/score_model_grader.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/score_model_grader_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/string_check_grader.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/string_check_grader_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/text_similarity_grader.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/graders/text_similarity_grader_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/image.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/image_create_variation_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/image_edit_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/image_edit_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/image_edit_partial_image_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/image_edit_stream_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/image_gen_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/image_gen_partial_image_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/image_gen_stream_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/image_generate_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/image_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/images_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/model_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/moderation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/moderation_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/moderation_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/moderation_image_url_input_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/moderation_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/moderation_multi_modal_input_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/moderation_text_input_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/other_file_chunking_strategy_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/audio_transcription.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/audio_transcription_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/client_secret_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/client_secret_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_added.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_create_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_create_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_delete_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_delete_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_deleted_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_done.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_input_audio_transcription_segment.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_retrieve_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_retrieve_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_truncate_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_truncate_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/conversation_item_truncated_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/input_audio_buffer_append_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/input_audio_buffer_append_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/input_audio_buffer_clear_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/input_audio_buffer_clear_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/input_audio_buffer_cleared_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/input_audio_buffer_commit_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/input_audio_buffer_commit_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/input_audio_buffer_committed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/input_audio_buffer_speech_started_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/input_audio_buffer_speech_stopped_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/input_audio_buffer_timeout_triggered.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/log_prob_properties.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/mcp_list_tools_completed.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/mcp_list_tools_failed.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/mcp_list_tools_in_progress.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/noise_reduction_type.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/output_audio_buffer_clear_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/output_audio_buffer_clear_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/rate_limits_updated_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_audio_config.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_audio_config_input.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_audio_config_input_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_audio_config_output.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_audio_config_output_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_audio_config_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_audio_formats.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_audio_formats_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_audio_input_turn_detection.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_audio_input_turn_detection_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_client_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_client_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_connect_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_conversation_item_assistant_message.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_conversation_item_assistant_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_conversation_item_function_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_conversation_item_function_call_output.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_conversation_item_function_call_output_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_conversation_item_function_call_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_conversation_item_system_message.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_conversation_item_system_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_conversation_item_user_message.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_conversation_item_user_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_error.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_error_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_function_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_function_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_approval_request.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_approval_request_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_approval_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_approval_response_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_list_tools.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_list_tools_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_protocol_error.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_protocol_error_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_tool_call_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_tool_execution_error.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcp_tool_execution_error_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcphttp_error.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_mcphttp_error_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_response_create_audio_output.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_response_create_audio_output_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_response_create_mcp_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_response_create_mcp_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_response_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_response_create_params_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_response_status.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_response_usage.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_response_usage_input_token_details.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_response_usage_output_token_details.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_server_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_session_client_secret.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_session_create_request.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_session_create_request_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_session_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_tool_choice_config.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_tool_choice_config_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_tools_config.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_tools_config_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_tools_config_union.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_tools_config_union_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_tracing_config.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_tracing_config_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_transcription_session_audio.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_transcription_session_audio_input.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_transcription_session_audio_input_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_transcription_session_audio_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_transcription_session_create_request.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_transcription_session_create_request_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_transcription_session_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_transcription_session_turn_detection.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_truncation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_truncation_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_truncation_retention_ratio.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/realtime_truncation_retention_ratio_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_audio_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_audio_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_audio_transcript_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_audio_transcript_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_cancel_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_cancel_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_content_part_added_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_content_part_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_create_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_create_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_function_call_arguments_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_function_call_arguments_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_mcp_call_arguments_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_mcp_call_arguments_done.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_mcp_call_completed.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_mcp_call_failed.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_mcp_call_in_progress.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_output_item_added_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_output_item_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_text_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/response_text_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/session_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/session_update_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/session_update_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/realtime/session_updated_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/computer_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/computer_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/custom_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/custom_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/easy_input_message.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/easy_input_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/file_search_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/file_search_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/function_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/function_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/input_item_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/parsed_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_audio_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_audio_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_audio_transcript_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_audio_transcript_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_code_interpreter_call_code_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_code_interpreter_call_code_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_code_interpreter_call_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_code_interpreter_call_in_progress_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_code_interpreter_call_interpreting_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_code_interpreter_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_code_interpreter_tool_call_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_computer_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_computer_tool_call_output_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_computer_tool_call_output_screenshot.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_computer_tool_call_output_screenshot_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_computer_tool_call_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_content_part_added_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_content_part_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_conversation_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_custom_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_custom_tool_call_input_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_custom_tool_call_input_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_custom_tool_call_output.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_custom_tool_call_output_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_custom_tool_call_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_error.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_error_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_failed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_file_search_call_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_file_search_call_in_progress_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_file_search_call_searching_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_file_search_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_file_search_tool_call_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_format_text_config.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_format_text_config_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_format_text_json_schema_config.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_format_text_json_schema_config_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_function_call_arguments_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_function_call_arguments_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_function_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_function_tool_call_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_function_tool_call_output_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_function_tool_call_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_function_web_search.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_function_web_search_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_image_gen_call_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_image_gen_call_generating_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_image_gen_call_in_progress_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_image_gen_call_partial_image_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_in_progress_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_includable.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_incomplete_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_audio.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_audio_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_content_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_file.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_file_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_image.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_image_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_item_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_message_content_list.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_message_content_list_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_message_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_text.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_input_text_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_item_list.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_mcp_call_arguments_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_mcp_call_arguments_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_mcp_call_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_mcp_call_failed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_mcp_call_in_progress_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_mcp_list_tools_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_mcp_list_tools_failed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_mcp_list_tools_in_progress_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_output_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_output_item_added_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_output_item_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_output_message.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_output_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_output_refusal.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_output_refusal_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_output_text.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_output_text_annotation_added_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_output_text_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_prompt.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_prompt_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_queued_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_reasoning_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_reasoning_item_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_reasoning_summary_part_added_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_reasoning_summary_part_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_reasoning_summary_text_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_reasoning_summary_text_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_reasoning_text_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_reasoning_text_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_refusal_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_refusal_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_retrieve_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_status.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_stream_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_text_config.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_text_config_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_text_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_text_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_usage.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_web_search_call_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_web_search_call_in_progress_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/response_web_search_call_searching_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_choice_allowed.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_choice_allowed_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_choice_custom.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_choice_custom_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_choice_function.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_choice_function_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_choice_mcp.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_choice_mcp_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_choice_options.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_choice_types.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_choice_types_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/web_search_preview_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/web_search_preview_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/web_search_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/responses/web_search_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/all_models.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/chat_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/comparison_filter.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/compound_filter.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/custom_tool_input_format.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/error_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/function_definition.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/function_parameters.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/metadata.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/reasoning.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/reasoning_effort.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/response_format_json_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/response_format_json_schema.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/response_format_text.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/response_format_text_grammar.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/response_format_text_python.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared/responses_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/chat_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/comparison_filter.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/compound_filter.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/custom_tool_input_format.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/function_definition.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/function_parameters.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/metadata.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/reasoning.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/reasoning_effort.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/response_format_json_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/response_format_json_schema.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/response_format_text.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/shared_params/responses_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/static_file_chunking_strategy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/static_file_chunking_strategy_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/static_file_chunking_strategy_object_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/static_file_chunking_strategy_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/upload.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/upload_complete_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/upload_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/uploads/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/uploads/part_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/uploads/upload_part.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_store.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_store_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_store_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_store_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_store_search_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_store_search_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_store_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_stores/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_stores/file_batch_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_stores/file_batch_list_files_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_stores/file_content_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_stores/file_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_stores/file_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_stores/file_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_stores/vector_store_file.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_stores/vector_store_file_batch.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/vector_stores/vector_store_file_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/batch_cancelled_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/batch_completed_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/batch_expired_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/batch_failed_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/eval_run_canceled_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/eval_run_failed_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/eval_run_succeeded_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/fine_tuning_job_failed_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/realtime_call_incoming_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/response_cancelled_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/response_completed_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/response_failed_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/response_incomplete_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/webhooks/unwrap_webhook_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/types/websocket_connection_options.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-0liyujg0/lib/python/openai/version.cpython-39.pyc,, ../../bin/openai,sha256=hl2E5BbKWVfkczcW65l8G1zyeJ3Si5m9TUnp5aG8gtY,276 -openai-1.86.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -openai-1.86.0.dist-info/METADATA,sha256=cKhyhe0accRk2V0OGPq-AuQNUR1rww2ZJD0qN6G3nxw,25138 -openai-1.86.0.dist-info/RECORD,, -openai-1.86.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -openai-1.86.0.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87 -openai-1.86.0.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43 -openai-1.86.0.dist-info/licenses/LICENSE,sha256=1xHtN7sZrnJJr40JO4_G6nWP01VLkqxhUAwa08wOP7k,11336 -openai/__init__.py,sha256=o-XgBBjlusJELbtJfi8w2rcdxQ3tRS2z4uvE24MTdJQ,10544 +openai-1.107.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +openai-1.107.2.dist-info/METADATA,sha256=_oliSkNGptefjXcXlTYV63p2-x1Run5IIo25lz2ZZyM,29030 +openai-1.107.2.dist-info/RECORD,, +openai-1.107.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +openai-1.107.2.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87 +openai-1.107.2.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43 +openai-1.107.2.dist-info/licenses/LICENSE,sha256=1xHtN7sZrnJJr40JO4_G6nWP01VLkqxhUAwa08wOP7k,11336 +openai/__init__.py,sha256=B-nf5xcFoH-mn3Pf7SXw7bm4UOiNR5FWgrRIjEuEu0A,11128 openai/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30 -openai/_base_client.py,sha256=I9VcAhlRwB-wpAs3cd0UTAcEXRdGnxegT5ovz6gFuMY,66282 -openai/_client.py,sha256=xDU9TIPqxltUmI_32LFXEl12eD8mNJUdS7_sB4ecv1A,38062 -openai/_compat.py,sha256=Mtzi28qOK99ZBPcGcQqdjoUFk2MzzpqjaafjuwQ4NO0,6982 +openai/_base_client.py,sha256=py5X2dcjQS6bTuzlh3tXm3SxDoXLHwzQ32t0GLJt_YE,68237 +openai/_client.py,sha256=5aw6OYUlG0UDAzdlBzwur61H5glWHCJlzxTy8gRhPWg,43473 +openai/_compat.py,sha256=k2XpUhYfgp5ZXkZkQAftJHt_UWFjUct1Sm2ye2kPBXo,6964 openai/_constants.py,sha256=WmCwgT4tGmFsSrltb26f3bM8ftUyFYkzh32Ny5yl-So,467 -openai/_exceptions.py,sha256=2BEuXwqce9z7X6lWLLXRqg1vOay_q-OdLz9lcj6Pluw,4798 +openai/_exceptions.py,sha256=TYcCxnfT7fln5duvVnCVJ0znuUHXSAbCT5sAMnaeKjU,5008 openai/_extras/__init__.py,sha256=sainrYWujCxIyL24wNpKfMVr-ZyBPlnSZfqXcg2S6Xg,165 openai/_extras/_common.py,sha256=NWWtgbdJsO3hQGQxaXGfVk0LjeIE5AFZ8VS_795hhMc,364 openai/_extras/numpy_proxy.py,sha256=LyTZkKDdnjz0h1SKLsphrhmXyUsJ_xEUhTFMrCf7k7g,805 openai/_extras/pandas_proxy.py,sha256=NCEt1Dqwc_0H85YdsWPDE3lPDJtYnBT8G-gJE_BCeEc,637 openai/_extras/sounddevice_proxy.py,sha256=xDoE21YGu13dSAJJkiOM9Qdb7uOIv5zskaJRX6xciEg,725 -openai/_files.py,sha256=WEf6hxJN1u3pVkdnPCpinhxCUnOV2olt4J6vLoJ_k48,3616 +openai/_files.py,sha256=cQOoF0UFpnyH5JMIdu_EvGpj_dGzH1ojtJvyX7Xwqn0,3612 openai/_legacy_response.py,sha256=fx9I0IInZY1zr2bUmpqW2ZUcL9JW2xS6S4NqFuwhdPM,16237 -openai/_models.py,sha256=htXhWuIpQf9gCHbePbd0T-DNNGAk2TDW8NO1wg2AdRw,30885 -openai/_module_client.py,sha256=5d09ESURt1WzwyyuU5UIi9Nf3fb7LZy5fOzkNX1Gu9s,4047 +openai/_models.py,sha256=q2pO0k0ZGbDua1KFMasMb4G0n4KyhUO8agXs04yj_XY,31764 +openai/_module_client.py,sha256=bIfYb6J1rtubuqEkiZVf5zdu9RmKWQiDeudded9Ch80,4817 openai/_qs.py,sha256=AOkSz4rHtK4YI3ZU_kzea-zpwBUgEY8WniGmTPyEimc,4846 openai/_resource.py,sha256=IQihFzFLhGOiGSlT2dO1ESWSTg2XypgbtAldtGdTOqU,1100 openai/_response.py,sha256=zLVaMPYE1o2Tz1eS5_bnJNGMikRN1byMpMcVpW1tgIU,29510 -openai/_streaming.py,sha256=CzoTv1ialbRWDDPG5zXL-DKuS-RqW5xd8A5Ki0acC14,13254 -openai/_types.py,sha256=tXtQgR4pyal7AZ8kvCh_KC2CSwBeWiiLKBrcMgTZwJs,6296 -openai/_utils/__init__.py,sha256=WnJrKMH-HJifY1H9sSTocSjuVSm4s2W_2QnIm3-wxZI,2222 +openai/_streaming.py,sha256=eT79w7kiXCR_PGRAC9veunVnlMKau1yP0xUMUMKagp0,13390 +openai/_types.py,sha256=xuD-NES62HeaWYe9rJx6K0wRDGMe5vkrgEFFd_wyPFs,7395 +openai/_utils/__init__.py,sha256=qiOG_n0G-sP5r5jNvD4OUaeaVLFEw5s-h7h7b0nD7Nk,2465 +openai/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195 +openai/_utils/_datetime_parse.py,sha256=bABTs0Bc6rabdFvnIwXjEhWL15TcRgWZ_6XGTqN8xUk,4204 openai/_utils/_logs.py,sha256=IC5iwPflwelNpJEpWsvK3up-pol5hR8k_VL9fSukk_Y,1351 openai/_utils/_proxy.py,sha256=aglnj2yBTDyGX9Akk2crZHrl10oqRmceUy2Zp008XEs,1975 openai/_utils/_reflection.py,sha256=aTXm-W0Kww4PJo5LPkUnQ92N-2UvrK1-D67cJVBlIgw,1426 openai/_utils/_resources_proxy.py,sha256=AHHZCOgv-2CRqB4B52dB7ySlE5q6QCWj0bsTqNmzikw,589 openai/_utils/_streams.py,sha256=SMC90diFFecpEg_zgDRVbdR3hSEIgVVij4taD-noMLM,289 openai/_utils/_sync.py,sha256=TpGLrrhRNWTJtODNE6Fup3_k7zrWm1j2RlirzBwre-0,2862 -openai/_utils/_transform.py,sha256=IGkmM1m26ghH4LAIf07zhY87LXO4z00amwLz_fONgB0,15665 -openai/_utils/_typing.py,sha256=D0DbbNu8GnYQTSICnTSHDGsYXj8TcAKyhejb0XcnjtY,4602 -openai/_utils/_utils.py,sha256=h2TetivHt1f12-1v3MjysVaTQlCGzA33R6qzwRgNSKk,12727 -openai/_version.py,sha256=vxG_cI60yaKezQGM1F7r9wlIGE1vYlWgPBIAR8JbduA,159 +openai/_utils/_transform.py,sha256=smjI7WdWI69i6xtXq_7M5YOK5g2uclnSv6GJFwNO1R8,16005 +openai/_utils/_typing.py,sha256=N_5PPuFNsaygbtA_npZd98SVN1LQQvFTKL6bkWPBZGU,4786 +openai/_utils/_utils.py,sha256=NJAWnI8MgScTYEUpSLHGgNfgzsl6nyj949Fstg0ZrhM,12646 +openai/_version.py,sha256=e2heCRU7EXQk-1TbO1BcrT5iun_iwF6obO8XuPumC8Y,160 openai/cli/__init__.py,sha256=soGgtqyomgddl92H0KJRqHqGuaXIaghq86qkzLuVp7U,31 openai/cli/_api/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 -openai/cli/_api/_main.py,sha256=5yyfLURqCEaAN8B61gHaqVAaYgtyb9Xq0ncQ3P2BAh0,451 +openai/cli/_api/_main.py,sha256=3xVyycq-4HEYMBdMDJFk893PTXpr8yvkGL3eCiuSx8E,501 openai/cli/_api/audio.py,sha256=IPbABMwryQ0CQTF4gi6VS3hJi6qFjoyj6IDV2ZoPT6A,3787 openai/cli/_api/chat/__init__.py,sha256=MhFUQH9F6QCtbPMlbsU_DWTd7wc5DSCZ7Wy3FBGVij0,300 openai/cli/_api/chat/completions.py,sha256=GyfAo3B2w2ySV0dK9D2IIVA4fOb0zqJZadQ-Yc8a_yU,5536 openai/cli/_api/completions.py,sha256=ysOmnbXpFz3VB5N_5USPdObiYew62vEn6rMtNFwTJGQ,6412 openai/cli/_api/files.py,sha256=6nKXFnsC2QE0bGnVUAG7BTLSu6K1_MhPE0ZJACmzgRY,2345 +openai/cli/_api/fine_tuning/__init__.py,sha256=hZeWhTZtIRAl1xgSbznjpCYy9lnUUXngh8uEIbVn__Y,286 +openai/cli/_api/fine_tuning/jobs.py,sha256=EQb7AeXUi-qkm7Vgru-z2usGTAlkjCDFwzo-8bFyY2I,5359 openai/cli/_api/image.py,sha256=ovBExdn8oUK9ImOpsPafesfAlmcftLP2p7d37hcUtKU,5062 openai/cli/_api/models.py,sha256=pGmIGZToj3raGGpKvPSq_EVUR-dqg4Vi0PNfZH98D2E,1295 -openai/cli/_cli.py,sha256=o6zWCnq84u-DIGZuR9YoOUxTGTpx-oCU5mgAKDi555c,6779 +openai/cli/_cli.py,sha256=42j_eI8PPdFbVjufluregmNYTdwrw3yQtsHtTzyNvcQ,6779 openai/cli/_errors.py,sha256=nejlu1HnOyAIr2n7uqpFtWn8XclWj_9N8FwgfT3BPK8,471 -openai/cli/_models.py,sha256=tgsldjG216KpwgAZ5pS0sV02FQvONDJU2ElA4kCCiIU,491 +openai/cli/_models.py,sha256=_budygMbXh3Fv-w-TDfWecZNiKfox6f0lliCUytxE1Q,491 openai/cli/_progress.py,sha256=aMLssU9jh-LoqRYH3608jNos7r6vZKnHTRlHxFznzv4,1406 openai/cli/_tools/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 openai/cli/_tools/_main.py,sha256=pakjEXHRHqYlTml-RxV7fNrRtRXzmZBinoPi1AJipFY,467 @@ -737,103 +983,107 @@ openai/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224 openai/lib/__init__.py,sha256=BMTfMnlbugMgDA1STDIAlx4bI4t4l_8bQmJxd0th0n8,126 openai/lib/_old_api.py,sha256=XZnXBrEKuTd70iJirj5mGW35fZoqruJobbBTq6bvg10,1947 openai/lib/_parsing/__init__.py,sha256=wS3BYvMGj9TqiPqOe3rO1sleaAJqHVuCaQuCE5rZIUw,539 -openai/lib/_parsing/_completions.py,sha256=S_I--5LD9D672rBQV44uVhWUdvnvwNfwjj7fBkfosBQ,9150 -openai/lib/_parsing/_responses.py,sha256=5Fnj5PA4ob3HhJ8QM_8CgJXOEwHbKv9sfBSPoETKlok,5980 -openai/lib/_pydantic.py,sha256=MF-M_S4atYolma-qpAMUBgGp1nUDJY6bxnzQEtYId1U,5617 -openai/lib/_tools.py,sha256=KInc2niRgZOdeQhab-FnSqgJ--TI8MBKGnbPQ3W2Y58,1953 +openai/lib/_parsing/_completions.py,sha256=n1jIOBmhpUxYqRmktplQ5FPJOyBx6j-Kv9MEyquZJoI,10720 +openai/lib/_parsing/_responses.py,sha256=mqzKyVcXDepYUpUuGm-2bH6_xI9Y6XQAc3mnXc8aRqw,6025 +openai/lib/_pydantic.py,sha256=Cf0vGwuWdNEuIUg8WNREjWRGApMObgl8DjdLU4f5jAc,5623 +openai/lib/_tools.py,sha256=Dc4U2TXKvfAvVUvDS30SDeftrwgGM2vZ85t5ojLHiEg,1969 openai/lib/_validators.py,sha256=cXJXFuaAl7jeJcYHXXnFa4NHGtHs-_zt3Zs1VVCmQo4,35288 -openai/lib/azure.py,sha256=ODaQIn5YCayr9NoNOTp8LEoorXZCgpKybEirXx8V2Ic,25646 +openai/lib/azure.py,sha256=dLzUXTXUOnfarLdDyO6dVzp8wY2vTMFFHUJZLuFznWY,26537 openai/lib/streaming/__init__.py,sha256=kD3LpjsqU7caDQDhB-YjTUl9qqbb5sPnGGSI2yQYC70,379 openai/lib/streaming/_assistants.py,sha256=LUWSinmYopQIkQ5xSg73b6BWbkRkQS5JvX62w_V9xSw,40692 openai/lib/streaming/_deltas.py,sha256=I7B_AznXZwlBmE8Puau7ayTQUx6hMIEVE8FYTQm2fjs,2502 openai/lib/streaming/chat/__init__.py,sha256=7krL_atOvvpQkY_byWSglSfDsMs5hdoxHmz4Ulq7lcc,1305 -openai/lib/streaming/chat/_completions.py,sha256=mh37wLWGTPMLSgHOueBPHZyIjvSUGtE9d-2D1DyvzbQ,30826 +openai/lib/streaming/chat/_completions.py,sha256=RaX-eOg0yo3JD-MiUdV3L1am6deH3sIjd3E1U1AF6Xw,30851 openai/lib/streaming/chat/_events.py,sha256=lstVmM6YR2Cs9drikzrY9JCZn9Nbfym0aKIPtNpxL6w,2618 openai/lib/streaming/chat/_types.py,sha256=-SYVBNhGkOUoJ-8dotxpCRqPJpfyOQ8hwR2_HrsQCRI,739 openai/lib/streaming/responses/__init__.py,sha256=MwE1Oc3OIiXjtuRFsuP_k5Ra8pNiqKpc1GZum-8ZRJM,543 -openai/lib/streaming/responses/_events.py,sha256=lTu_Gjd4xGatfJgy3nzabr5xUoZckSIzN3eIFnNVP9E,5423 -openai/lib/streaming/responses/_responses.py,sha256=kMlRYdsL_Up6pCOpMVcCjp7z-cPthj2bYeh3K56vu5o,13582 +openai/lib/streaming/responses/_events.py,sha256=3UWmeYgg23E3XTkYVlrpXJPnhBM2kmQFoXh3WiT9CrE,5576 +openai/lib/streaming/responses/_responses.py,sha256=JfDf3GLrLo8tnUVHusUXGXAYgOcLiv6VAB4Sp5PmLms,13672 openai/lib/streaming/responses/_types.py,sha256=msq1KWj3e3BLn7NKu5j2kzHgj9kShuoitgXEyTmQxus,276 -openai/pagination.py,sha256=hzsCpoji93bVZ8jOfBs-sqPtTG0mf1MrpiduCOYlGoI,3242 +openai/pagination.py,sha256=dtPji3wApb_0rkvYDwh50rl8cjxT3i6EUS6PfTXwhQI,4770 openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 openai/resources/__init__.py,sha256=3vxUFYFi0I0JQ1tbQ8irmHiHCNd4sJ1Pc0_EnVk2SxE,5713 openai/resources/audio/__init__.py,sha256=YM7FHvPKVlj_v6EIgfpUQsb6q4hS2hVQ3gfkgic0sP0,1687 openai/resources/audio/audio.py,sha256=nEIB4q7a1MSYdQkcYH2O6jB-_rNCMDCBJyUuqOL67CI,5491 -openai/resources/audio/speech.py,sha256=yqc_h7dAsFwae5PrbpYkWpme1sY9B5PTSnjgrJWdzf4,9744 +openai/resources/audio/speech.py,sha256=Oud_n7MIvQ3dmOYcwsrgl18b9-4OmA0MRL0haBgLC9g,10232 openai/resources/audio/transcriptions.py,sha256=8Vt-ibNuIConeAz6ITQya_0NZkINjJdSDvV3BG8-PqQ,39960 openai/resources/audio/translations.py,sha256=3z6KoZfqy0AiYTBS-WwQx-ZFWcMBJluRaCHsoHlyJ68,15675 -openai/resources/batches.py,sha256=D7eS8qkw9qgs3q3Kura11NPS0uAwrs2XSFKiyu--0O0,20328 +openai/resources/batches.py,sha256=qZhg4o2VpPXIObU22d-JF2SRFjteBNDx3NtYZ33QJYo,20914 openai/resources/beta/__init__.py,sha256=rQz4y41YG2U8oSunK-nWrWBNbE_sIiEAjSLMzLIf4gU,1203 -openai/resources/beta/assistants.py,sha256=ptQFCa4WxB65zXFSdBRfhw4YSf7tftTjDB5njEsJTyk,45733 -openai/resources/beta/beta.py,sha256=Cc12HW9KWTegSpPnwFKRZJIhEfp0uBzOKpuTUBcO8Xw,5478 -openai/resources/beta/chat/__init__.py,sha256=d_fpyFMAG3iRAPIXANPfRG4HtEm6U_uMUYep7Skj2uY,263 -openai/resources/beta/chat/chat.py,sha256=sNvU8Fi_o3dWkD_X4Mobafv9XWBP6Y2dJxng-NdFXUs,597 -openai/resources/beta/chat/completions.py,sha256=Um3MOAuTSVHD0n7OI0I7piF0JOV6rooLoWEbcZ6pocE,29122 +openai/resources/beta/assistants.py,sha256=sjxzgLOGWqiM7nskgCxNIWg0tUkMBdAnAfJojmEK658,46079 +openai/resources/beta/beta.py,sha256=YyQeQiy7gdy6vyG9Q0AUqCzFb6Ow1BFKXj6QvtHFhDI,4771 openai/resources/beta/realtime/__init__.py,sha256=dOXRjPiDqRJXIFoGKSVjzKh3IwSXnLbwHx4ND5OdnVs,1412 -openai/resources/beta/realtime/realtime.py,sha256=7sgrNuxUHEkARV46OF5zz9uxzhCZvsqBrP1DepvZWyU,43470 -openai/resources/beta/realtime/sessions.py,sha256=FrN_-R_hxGoDsVPF_PAAIfHsouHatRfaRm_6AWvLlnU,22134 -openai/resources/beta/realtime/transcription_sessions.py,sha256=ByxJvryK6xg53gzus1qWG7OxI6bVwYbcU-qh-BbH0bI,14122 +openai/resources/beta/realtime/realtime.py,sha256=tuiq_0PdFmC2p-LNOfQNrVuDEMlLAHKEgeAsPsHLUHU,43694 +openai/resources/beta/realtime/sessions.py,sha256=XwW877IGqRZDl5DB055KcrrAsz4YrFoVTVF40tgMxVQ,21964 +openai/resources/beta/realtime/transcription_sessions.py,sha256=uTDGEat50lojdD0N8slnZu2RVzMP96rlicpDp4tpl34,14124 openai/resources/beta/threads/__init__.py,sha256=fQ_qdUVSfouVS5h47DlTb5mamChT4K-v-siPuuAB6do,1177 -openai/resources/beta/threads/messages.py,sha256=eYoTcCIOcWEXnOU1Myf1hDUR31MkNRzZYNG76eaiJik,30906 +openai/resources/beta/threads/messages.py,sha256=Mrc1HAURaXeBUJzZUlnjJ24yiSMffvaGaBZKSKR9H6Q,30926 openai/resources/beta/threads/runs/__init__.py,sha256=2FfDaqwmJJCd-IVpY_CrzWcFvw0KFyQ3cm5jnTfI-DQ,771 -openai/resources/beta/threads/runs/runs.py,sha256=iFqFyWNQ_2jb1ysjnOWHUms8cydru-QPbU8WENaUyfI,155392 -openai/resources/beta/threads/runs/steps.py,sha256=hZNCr_P-d1T3HxHuW7I7IPL_VwwZffFEtlgwOrb-Kow,17069 -openai/resources/beta/threads/threads.py,sha256=s1h76veuap8TJ1u-m3-WsnFOwDVdg2NtliIOdoDrBRI,99762 +openai/resources/beta/threads/runs/runs.py,sha256=X3zQ2Ox7WPynrSG12Fb00F1d8r566a5EkGO0PLpwDOA,155332 +openai/resources/beta/threads/runs/steps.py,sha256=L-wvKiCbyP1csP3wOu9iDA2IZSSFmoX5xZsNeJ-cPOE,17077 +openai/resources/beta/threads/threads.py,sha256=L3j0yInJUj2pm22DeNTVKW0QlZ-RsGubcYWmnDmttpw,99788 openai/resources/chat/__init__.py,sha256=8Q9ODRo1wIpFa34VaNwuaWFmxqFxagDtUhIAkQNvxEU,849 openai/resources/chat/chat.py,sha256=HjcasSCmt-g3-J-RkZQ9HRj_-hPfImakFxdUvvk5mCg,3364 openai/resources/chat/completions/__init__.py,sha256=KOi8blzNyHWD7nKgcoW3CxZ4428IcNVP0gCU74HySf8,901 -openai/resources/chat/completions/completions.py,sha256=Zj8qeECuwGxkknB-umSbWoEOGFM0KX6Tz55_1HrE9zE,124394 +openai/resources/chat/completions/completions.py,sha256=XIUrvCz4p78Cm0sYZ-Mfu3syieRTCYbdeozyAUMIoQ0,160692 openai/resources/chat/completions/messages.py,sha256=HCZH26TuTyuuajJy8MV--Irn4CHMQUSEt9C3j-E5Nvw,8052 -openai/resources/completions.py,sha256=q6aQ74RGb4c8yO0yO57nJCEMN78ysUFcqcyZMFw38iU,60135 +openai/resources/completions.py,sha256=UNHp9NO5HCG_SAbF2tblwrNJeqW4H8iVkwHByeV7zU0,60305 openai/resources/containers/__init__.py,sha256=7VzY-TFwG3x5D_kUCs_iAQaaCKAswt1Jk70KpmnU8Do,849 -openai/resources/containers/containers.py,sha256=Q1ceaaOI6AM329vqAGdbynENFuP-PLOUuvOv-XFU2O8,19306 +openai/resources/containers/containers.py,sha256=khM6qTRxbaDdtpcxe4TowiyVhMw0mniOPIFWA0i3nVI,19318 openai/resources/containers/files/__init__.py,sha256=nDhg0wY7eHRMO-xOErno0mV0Ya_ynlmKAp-4a3nj-us,810 openai/resources/containers/files/content.py,sha256=ObCykVTpSXFdzZUfH_zsZMLtouYZNkjBmugDoII1kIs,6491 openai/resources/containers/files/files.py,sha256=-iVBULSqyDo0vHhMh9tQB6ITT6ygu57z9k6A9Vq0mqo,21120 -openai/resources/embeddings.py,sha256=3f6-avPEeoH9rrfHNL1Ef3woqG-CwWC3iXPDk5pvZEY,12200 +openai/resources/conversations/__init__.py,sha256=Uslb4pakT8pQJGQ29CvoiN-SvN2AgMum-TeIDyYTzQE,888 +openai/resources/conversations/conversations.py,sha256=7YPgatQZYp4z95n72szQR1rTOPO5UfWoATADiJMyaZw,18765 +openai/resources/conversations/items.py,sha256=D3ZhT_5uGJD01sMmtop2uki-USV5AwS3nmEc-hBUbzo,24065 +openai/resources/embeddings.py,sha256=cQp63Y-3fNzlxxJEzFfgXFoZnpqVmXj_HxdSfgnR5U4,12416 openai/resources/evals/__init__.py,sha256=DXhYb6mCKKY2bDdS3s4raH1SvwPUyaBFvdHgPEbwRWY,771 openai/resources/evals/evals.py,sha256=wMMBn54Tz1MP9yG-GftiiDPlcsTiz4OEtuCn4Vic2k0,26036 openai/resources/evals/runs/__init__.py,sha256=7EtKZ43tGlmAOYyDdyFXy80tk2X8AmXb5taTWRRXBXE,850 openai/resources/evals/runs/output_items.py,sha256=jERS8aeNpZCCpsV-tfry3jYpxo15baxWj0eyjif8t0Y,12590 openai/resources/evals/runs/runs.py,sha256=XFxwcydOE0LcTuCORe0F1q5FjjupgENId5C6A27aq78,24455 -openai/resources/files.py,sha256=-C3vO77pqlP6Zx3q4HjgdX1-8cYu-wNp1Y4Q4sIAhcY,29720 +openai/resources/files.py,sha256=Pra8nNaukgAM6qXcnK3Ry7ireBdMVcnGzI9vtxP1ys0,30376 openai/resources/fine_tuning/__init__.py,sha256=RQPC5QfqE-ByhRQbJK-j7ooUrkBO9s9bKt5xkzOL8ls,1597 openai/resources/fine_tuning/alpha/__init__.py,sha256=QKAYZscx1Fw3GLD8cVdZAYG9L_i6MnPGeifn8GgcztU,810 openai/resources/fine_tuning/alpha/alpha.py,sha256=P-zLOHpI-Aa0jUUWspkanL7WpUtfjwIGDH8KTGDNeHY,3274 openai/resources/fine_tuning/alpha/graders.py,sha256=T22E_IdvrwRkxlFln87ETAABvWskuNyfmPLhR31bTF4,10764 openai/resources/fine_tuning/checkpoints/__init__.py,sha256=rvsbut5FCQNAr-VjvL-14GFT3Tld49FlFuBJDpfxBug,940 openai/resources/fine_tuning/checkpoints/checkpoints.py,sha256=njpz496JifeZ8RXjoYUb1Tj9tBItuXRxGJHW2jrrfwo,3606 -openai/resources/fine_tuning/checkpoints/permissions.py,sha256=XK11gP3bywJ8GjpBv8rtT6R9QWZq9VKBPjWzFWYcoGI,17151 +openai/resources/fine_tuning/checkpoints/permissions.py,sha256=igZOYcNtmlnoQFX3Q3IjwNS2fIzAuxgEBTmwzbhebd4,17163 openai/resources/fine_tuning/fine_tuning.py,sha256=UL4MXoUqEnbSZ5e4dnbUPTtd4tE-1p2L7Hh_0CQ_0s0,5410 openai/resources/fine_tuning/jobs/__init__.py,sha256=_smlrwijZOCcsDWqKnofLxQM2QLucZzXgboL9zJBPHw,849 openai/resources/fine_tuning/jobs/checkpoints.py,sha256=Z6p_IBzmVu3oRldxLKVKGVm1E8Xf7UUnItSnV7PJI9Y,7466 -openai/resources/fine_tuning/jobs/jobs.py,sha256=_I_mbAdX9wRWFT4tVyM3xmuakMuZN_KPHk-yxYGlzoE,37125 -openai/resources/images.py,sha256=Q6q7b1hls4IlLDaafs2f0uwWVT-p0EXcoMsdIH8IUQs,34257 +openai/resources/fine_tuning/jobs/jobs.py,sha256=gVnJKrvO-KKL4hq3gUHY4iMdAMkaH_lhNnyzBf-caGc,37237 +openai/resources/images.py,sha256=qFM-W9hkgO1ROPa7VKSHEJPwL1QKtOo1B0kNCgNeUyc,97291 openai/resources/models.py,sha256=CzLpB5Oj1x7U6eNKOcK0Z7M-NjEIpZvdWQLDAyIm7wM,11232 -openai/resources/moderations.py,sha256=P_fgkp6sxnLo0k4b8Fcm0MSmOh-Oyj_wU_NFGfGPszE,7784 +openai/resources/moderations.py,sha256=3VKDxVt7nWyK3SGq-7BDzfxgJWRCERYtoGluhDxWpdU,7814 +openai/resources/realtime/__init__.py,sha256=c4zQkJRr5mnDpJtVkY4w7abaPQaTNIw7DJ-h1XFWiAM,928 +openai/resources/realtime/client_secrets.py,sha256=6-ujnVsjIsv0nCka5I5HLT0mXpXZkIxxji_SnZmLA3A,7724 +openai/resources/realtime/realtime.py,sha256=NhVCfN9Me2ajzEbofDyDv8jyMfFwtJB4UOqAoB0g9QA,43272 openai/resources/responses/__init__.py,sha256=nqybLst4yLblEyC-vAJYOVgM2X4BvcFmgluRNqOGIhk,902 -openai/resources/responses/input_items.py,sha256=3FnbfOdMjbgtadTZPYQdKJut4NrorT1K9ppg5l9idoY,9150 -openai/resources/responses/responses.py,sha256=1ddJs3NsDcHpz5KOB6vfW73NCMDOmGT-RvijqzuHpeU,128382 +openai/resources/responses/input_items.py,sha256=UKqVz_T_SgS-rAX5iYWI9XRjUEaqv2N1P932Mixzljg,8834 +openai/resources/responses/responses.py,sha256=RguAEbEOWt2nDbDVn4wBxV-Xj6zhJOZKXRJvPd_TDkk,159306 openai/resources/uploads/__init__.py,sha256=HmY3WQgvUI2bN3CjfWHWQOk7UUC6Ozna97_lHhrrRSA,810 openai/resources/uploads/parts.py,sha256=n-G6rFFUaTs4Od1t37bvOzE0bV2VLwODjFBdbpsnkW0,8121 -openai/resources/uploads/uploads.py,sha256=om7V-X4eYkxODSGBLamwKxBoPh--2FK5pJsfVjNHSZ0,24881 +openai/resources/uploads/uploads.py,sha256=sBz8vvcuqCPHeiqOj9j6k0q6dnDLYSU71ivZYghjQCs,25579 openai/resources/vector_stores/__init__.py,sha256=11Xn1vhgndWiI0defJHv31vmbtbDgh2GwZT3gX8GgHk,1296 -openai/resources/vector_stores/file_batches.py,sha256=OgBqHEiNX8qLZZG1Ts8yJ4DcUEoByTcCdGL6-VQcb4k,33076 -openai/resources/vector_stores/files.py,sha256=2Ywh3UUunEDXvnJbqwvl0TMjAgqVYnW6eRtCMj7f0lA,39445 -openai/resources/vector_stores/vector_stores.py,sha256=UdDiwEjetJSkgsuX0n1aBFPQdPVM17fF2EqUn8ZWuIo,35249 -openai/types/__init__.py,sha256=3Ci6p75iU4Lr4rNAcgITmy_k5IVXjVl8_4L-8fQ3xBk,6164 +openai/resources/vector_stores/file_batches.py,sha256=Uy8-m695YoJmbjyQIZaIH9Udrl8MIreHc7VB5ilQZt0,33146 +openai/resources/vector_stores/files.py,sha256=4akTfvMTQOlPkY6bB6-8QLuxGPsrm_78t7UdJtyaNK8,39961 +openai/resources/vector_stores/vector_stores.py,sha256=6b2sTskq8YD2bvYXihIXqEzJnmQxxLKU7UwjO6V6Ulg,35299 +openai/resources/webhooks.py,sha256=wz3filqxxUEhhW5RSa-1LiN10MzafKXJPl5-Wb1mCew,7820 +openai/types/__init__.py,sha256=r4ftepF2GBzDtkmXwvkG6-HIg5wvz6wr9x2ulnB8fVE,6873 openai/types/audio/__init__.py,sha256=l_ZTfiqnguKJfEEb61zegs8QsVdW9MlIkGkn8jIDRlU,1426 -openai/types/audio/speech_create_params.py,sha256=nWFAnqH8ApYROP6pfOQFTa_-m-o0tQjufb1ManzQoT8,1657 +openai/types/audio/speech_create_params.py,sha256=u7FQabjLOgNhZu4FMyk1sa9qbadrmWzc-VnSesZXz3M,1780 openai/types/audio/speech_model.py,sha256=i_YqCZ4AWN0jCY70F8FAazQAsbQyG-VUQGxSJnLsviw,237 -openai/types/audio/transcription.py,sha256=YrTEIp6pIuW8zGEK7_MBNCBw3Y41pdPeL5dEVrM46Q0,787 +openai/types/audio/transcription.py,sha256=lUl3qdjgrK94zCjgpD4f9xa9w-vNhOTxh9hPeYj3ymc,2102 openai/types/audio/transcription_create_params.py,sha256=gV-2utqqPxbxShZDCPd_jhd6LjyT1NU9XVJOgty5h0c,5678 openai/types/audio/transcription_create_response.py,sha256=-PLGH8he9EdJtvBXV-ZrE31CLVnk4bc0VQ1ixRoN8Ck,378 openai/types/audio/transcription_include.py,sha256=mclUP_50njW7TG4d9m_E6zSjAFW8djPJ6ZTYub71kq0,227 openai/types/audio/transcription_segment.py,sha256=-pPAGolwIIXUBMic-H5U7aR0u_Aq-pipSA4xTtn_viA,1153 openai/types/audio/transcription_stream_event.py,sha256=e0ZMA1Ls5bR4C5NnPxZxfs-xiSczi8hrWMaF27pneUU,536 openai/types/audio/transcription_text_delta_event.py,sha256=jbfzVsjefZm64HAHXkKm4QskXxNqeEPj23xRt1clqvc,1075 -openai/types/audio/transcription_text_done_event.py,sha256=l-yxhofvs3_H6NLFy6Sqqs3Ap7bel4xuweYCeqEOJic,1084 -openai/types/audio/transcription_verbose.py,sha256=QkQBIdpvsubHjSvmvTb5ryo8Yzog3ZMvv4HZukEsjxI,760 +openai/types/audio/transcription_text_done_event.py,sha256=Q2-fKHeO_niBWWSCl-ZehKKz9DDM7KEimBx5Ha5e4t8,1940 +openai/types/audio/transcription_verbose.py,sha256=Dm5rPAMeMD-ZwijA8xff34QdOGLiRD5J2CN9R_dTIRo,1114 openai/types/audio/transcription_word.py,sha256=s2aWssAgHjMOZHhiihs1m4gYWQpjBP2rkI1DE5eZBXc,367 openai/types/audio/translation.py,sha256=Dlu9YMo0cc44NSCAtLfZnEugkM7VBA6zw2v9bfrLMh0,193 openai/types/audio/translation_create_params.py,sha256=ejrom_64QOe47gZtrYmDAQkb65wLaZL4-iU-mKVTVq0,1572 @@ -843,13 +1093,13 @@ openai/types/audio_model.py,sha256=suo0Ei6ODS2ksMRicXAzCfuDTGcbiMjwzVLi-bf4A6s,2 openai/types/audio_response_format.py,sha256=EEItnQdwXinG8bOe1We2039Z7lp2Z8wSXXvTlFlkXzM,259 openai/types/auto_file_chunking_strategy_param.py,sha256=hbBtARkJXSJE7_4RqC-ZR3NiztUp9S4WuG3s3W0GpqY,351 openai/types/batch.py,sha256=FuGQ-x8kK6VMyYIQeP5gu_LEmfzXMCht5ySHdFfJQnE,2880 -openai/types/batch_create_params.py,sha256=zHR9CwYMuhSzUacgBoA1L5jCHxuxVnt6BxR3Le2ghh4,1820 +openai/types/batch_create_params.py,sha256=p5qhTnzYVsAcXFuCj4Qyk3yPIo-FxSllTecdidq3dSs,2467 openai/types/batch_error.py,sha256=Xxl-gYm0jerpYyI-mKSSVxRMQRubkoLUiOP9U3v72EM,622 openai/types/batch_list_params.py,sha256=X1_sfRspuIMSDyXWVh0YnJ9vJLeOOH66TrvgEHueC84,705 openai/types/batch_request_counts.py,sha256=u_a_hehmqYE6N6lA3MfvF1-CVzR9phiMlHgh_sRff0Y,408 openai/types/beta/__init__.py,sha256=uCm_uj8IYmxFZYD9tmGcEqpeEKnlzo64pNHcwdvnNv0,2328 openai/types/beta/assistant.py,sha256=_OgFKmjaMXM2yNOTFTcCj5qVo_-F9p7uiEXJnYbB0XE,5054 -openai/types/beta/assistant_create_params.py,sha256=vn4P-38i0uCN7D0tH8Rhn8VZPWXtZCW_QxvcxDj1ToI,7897 +openai/types/beta/assistant_create_params.py,sha256=vmQlDewm-Zoa8jjZc4FGV_ocGtXlMbIaqShJHbpwsf4,7948 openai/types/beta/assistant_deleted.py,sha256=bTTUl5FPHTBI5nRm7d0sGuR9VCSBDZ-IbOn9G_IpmJQ,301 openai/types/beta/assistant_list_params.py,sha256=yW-lj6AUkG0IRZQKre0veEr9p4VMN-9YdELFMYs74Cw,1222 openai/types/beta/assistant_response_format_option.py,sha256=yNeoAWxM-_8Sjmwqu8exqyKRFhVZIKeTypetPY55VFA,561 @@ -863,7 +1113,7 @@ openai/types/beta/assistant_tool_choice_option.py,sha256=jrXMd_IYIQ1pt8Lkc-KrPd4 openai/types/beta/assistant_tool_choice_option_param.py,sha256=VcatO5Nej9e5eqfrwetG4uM1vFoewnBEcFz47IxAK2E,424 openai/types/beta/assistant_tool_choice_param.py,sha256=NOWx9SzZEwYaHeAyFZTQlG3pmogMNXzjPJDGQUlbv7Q,572 openai/types/beta/assistant_tool_param.py,sha256=6DcaU3nMjurur2VkVIYcCaRAY1QLQscXXjCd0ZHHGho,501 -openai/types/beta/assistant_update_params.py,sha256=Kfz6anZrxJN1tfE_CV23rK5-LWMhHmu8AMJo0BP7t7U,6426 +openai/types/beta/assistant_update_params.py,sha256=HMgyYQ9tNL9ab33VacvsJV1o1ebKXxAzOuva7wWx2Ck,6646 openai/types/beta/chat/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122 openai/types/beta/code_interpreter_tool.py,sha256=7mgQc9OtD_ZUnZeNhoobMFcmmvtZPFCNYGB-PEnNnfs,333 openai/types/beta/code_interpreter_tool_param.py,sha256=X6mwzFyZx1RCKEYbBCPs4kh_tZkxFxydPMK4yFNJkLs,389 @@ -873,26 +1123,26 @@ openai/types/beta/function_tool.py,sha256=oYGJfcfPpUohKw2ikgshDjOI1HXCK-5pAWyegY openai/types/beta/function_tool_param.py,sha256=hCclpGO4Re-TxiGy_QxX75g1kcN6_ElubicO6SdJ_YI,471 openai/types/beta/realtime/__init__.py,sha256=trJb-lqh3vHHMYdohrgiU2cHwReFZyw4cXM-Xj8Dwq8,7364 openai/types/beta/realtime/conversation_created_event.py,sha256=U4-nesN8rAep2_25E2DrkXUMafQejj3NE_0llXKj5Y8,752 -openai/types/beta/realtime/conversation_item.py,sha256=av6WCjWVuRxBjccmxv4j26cd3TCKURj2a7cf8uS3P3s,2297 -openai/types/beta/realtime/conversation_item_content.py,sha256=dj0XAEPqj4UPVb3E2nIgb8bZBA-PRNK-E7o3des6wmw,1005 -openai/types/beta/realtime/conversation_item_content_param.py,sha256=CKEwY9j6ApnvfsLKrdkEFfOW1CtxUWyY9OL-rIMUNaw,927 +openai/types/beta/realtime/conversation_item.py,sha256=eIFg9zl3qzEijcQZvCnkvVLpSZgvEdubasgxGsQuFM4,2327 +openai/types/beta/realtime/conversation_item_content.py,sha256=KWZY8EUkjAi6K_IkWVjjrNZLG3KK2vGCy2_O30CEhzY,1050 +openai/types/beta/realtime/conversation_item_content_param.py,sha256=CrGi3XKwnfJdQGs-kJaGCsn53omdJF6_je0GWnVXhjQ,972 openai/types/beta/realtime/conversation_item_create_event.py,sha256=jYXYdmqJh_znzcAgDuCxJXo5shf-t_DwmsyFkaDVnAE,1081 openai/types/beta/realtime/conversation_item_create_event_param.py,sha256=vxTag6TrOLu1bf46F3mUmRkl5dd1Kb6bUp65gBDVmhM,1101 -openai/types/beta/realtime/conversation_item_created_event.py,sha256=DIeG7YQ5HdKrnbnorklB1Zfsz42yRdPKDOx5TPzfvw0,722 +openai/types/beta/realtime/conversation_item_created_event.py,sha256=cZBm_uKk5dkQXLlbF0Aetg4NJge3Ftz9kwRu2kCI3m4,817 openai/types/beta/realtime/conversation_item_delete_event.py,sha256=p-O6R1Ku5pxZvaxhSi4YTPqLXS1SHhdLGgJuPQyPcHY,549 openai/types/beta/realtime/conversation_item_delete_event_param.py,sha256=a17h8Hd8MxUbXT6NQg8YpTr1ICt1ztRecpfukHw4g34,569 openai/types/beta/realtime/conversation_item_deleted_event.py,sha256=uWHSqX5ig550romSdhtROwrdQmdeN31Oz1Vpr9IuQFI,492 -openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py,sha256=wCFIvqGe4AUCsSjZ00-w6synK_uf3MWKxxFnDUphtDc,1173 +openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py,sha256=FwZHHO4ZGMKoeQ80snCh_S-7anNUQtRLOhGjb8ScGOQ,2538 openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py,sha256=5kjLmnRJug7L5fHxSSWWbhB70jGwNaMwbdENEwz9Xek,1143 openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py,sha256=xYNSBIyERQJ4P-5YoFF1VptfPa8JnJ0sWaH6LGsPow0,1077 -openai/types/beta/realtime/conversation_item_param.py,sha256=x12A5-yjNWodFNJEnbHKY1WJzSzX9s7EQr2c5FuYKBQ,2177 +openai/types/beta/realtime/conversation_item_param.py,sha256=HMB7MFR6WkztV1vMCFdIYNv8qOY4jzI2MIDtr9y8nEo,2207 openai/types/beta/realtime/conversation_item_retrieve_event.py,sha256=5Cc7f0fM8ujwER0eIcQRwz0rmc6hdCUrAqiAvRNn9Zc,559 openai/types/beta/realtime/conversation_item_retrieve_event_param.py,sha256=TRYaZ3btNaywRPaMOVRzK5VT7wh4taIGjbUdhkZ7gFc,579 openai/types/beta/realtime/conversation_item_truncate_event.py,sha256=1c2_BamaTkgD26eyGZJU5xwbz7lRHupqU2HqcK0VniI,943 openai/types/beta/realtime/conversation_item_truncate_event_param.py,sha256=hSnVOSMMtLf16nn4ISHkevYCfEsiN9kNcgxXRtHa8Kc,983 openai/types/beta/realtime/conversation_item_truncated_event.py,sha256=K4S35U85J-UNRba9nkm-7G1ReZu8gA8Sa1z0-Vlozc0,704 -openai/types/beta/realtime/conversation_item_with_reference.py,sha256=WF4r7-aw9Z6m6aNEy_fe9aHq8W-YxhwgU65PnLAQTgw,2564 -openai/types/beta/realtime/conversation_item_with_reference_param.py,sha256=yPM2TL7pMhz5UfJ37_FTn1H6r2WRbdxkAaW5jGCMfh8,2444 +openai/types/beta/realtime/conversation_item_with_reference.py,sha256=NDMfbnG0YKLqWJskFSHRIMkN2ISs8yNRxP6d6sZshws,3288 +openai/types/beta/realtime/conversation_item_with_reference_param.py,sha256=X0iEdjijFkoGtZtp0viB8bAFqMn4fNNSvJiCZbgJ-3Q,3079 openai/types/beta/realtime/error_event.py,sha256=goNkorKXUHKiYVsVunEsnaRa6_3dsDKVtrxXQtzZCmk,877 openai/types/beta/realtime/input_audio_buffer_append_event.py,sha256=lTKWd_WFbtDAy6AdaCjeQYBV0dgHuVNNt_PbrtPB8tg,662 openai/types/beta/realtime/input_audio_buffer_append_event_param.py,sha256=XmN2bE6jBRrkKGVPJdnPjJql5dqMPqwbmFnxo-z22JE,682 @@ -901,14 +1151,14 @@ openai/types/beta/realtime/input_audio_buffer_clear_event_param.py,sha256=y-zfWq openai/types/beta/realtime/input_audio_buffer_cleared_event.py,sha256=j9gpm7aGVmrUt48wqtvBMN8NOgtvqHciegjXjOnWm7A,429 openai/types/beta/realtime/input_audio_buffer_commit_event.py,sha256=SLZR2xxRd6uO3IQL6-LuozkjROXiGyblKoHYQjwXk4I,493 openai/types/beta/realtime/input_audio_buffer_commit_event_param.py,sha256=B8agXC-rUl-D-RijJ5MeTLgw43qVYzmf2_2oAVokhLY,503 -openai/types/beta/realtime/input_audio_buffer_committed_event.py,sha256=wXMxuXLw1jmT4e-FmTp6rSxcSc_4l55zO3gT7jI1Mp4,628 +openai/types/beta/realtime/input_audio_buffer_committed_event.py,sha256=76XHl3ETfG5YiYce2OCUsv0wNfSiaabLzYVjGtBwux0,733 openai/types/beta/realtime/input_audio_buffer_speech_started_event.py,sha256=NVp60RUsLFtte9Ilknmu_5lRk2dZp_1fXCgGHd4EvSM,861 openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py,sha256=gszRuYQtAW8upIhd7CJZ7pxboDk-K7sqidjqxgf47q4,779 openai/types/beta/realtime/rate_limits_updated_event.py,sha256=kBnf_p-49Q_LNdJsj0R1Szi8R4TGYAAJ_KifLuuyFZw,949 openai/types/beta/realtime/realtime_client_event.py,sha256=0c48JcJH5yruF52zl0Sanm_dd2W5ZHV5GocRG0Xm6m4,1839 openai/types/beta/realtime/realtime_client_event_param.py,sha256=xBeZ60Q-OWuZxstPQaoqE0DUTDOPOwrL8LWMmDJI2rM,1887 openai/types/beta/realtime/realtime_connect_params.py,sha256=AvTypkFCYmDn9qMeektVqij6cqzgovr3PpgpMalJoJ4,290 -openai/types/beta/realtime/realtime_response.py,sha256=uS72PRUMwozY3xEMiv2XavPfrcIytjCRH7ZAYTCj5LI,3622 +openai/types/beta/realtime/realtime_response.py,sha256=iUOItlPQv6-okCuiTsloe0LDVyJ0MUQ64ug8ZaLePnw,3567 openai/types/beta/realtime/realtime_response_status.py,sha256=gU-59Pr_58TRfMZqFzdCloc53e1qOnU4aaHY3yURUK8,1326 openai/types/beta/realtime/realtime_response_usage.py,sha256=6XOFjCjPWioHoICZ0Q8KXuUzktQugx6WuTz0O5UvzZg,1541 openai/types/beta/realtime/realtime_server_event.py,sha256=-PpqZpg-DL_C_wseLMRQHWdBvxnVGRAfOF7x13Qr34E,5408 @@ -920,8 +1170,8 @@ openai/types/beta/realtime/response_cancel_event.py,sha256=EKx8IZUISJHdl-_3tCdHt openai/types/beta/realtime/response_cancel_event_param.py,sha256=nidzBL83liHwyImiNGiz9Ad0V34EtFAQDw1utqcF6ns,630 openai/types/beta/realtime/response_content_part_added_event.py,sha256=a8-rm1NAwX685fk7GdT6Xi0Yr-JfeAkyUr94-RoFe34,1232 openai/types/beta/realtime/response_content_part_done_event.py,sha256=jO2TZygxPabbnEG9E1AfNP-JYJv1QtCMnCzgcZ_3n18,1190 -openai/types/beta/realtime/response_create_event.py,sha256=44ip8SdNcqfwg4ToQ7zY2hyKoaiQuR-yH8u4nIj6J7o,4844 -openai/types/beta/realtime/response_create_event_param.py,sha256=viQVrf4a-tNkrWlbelT6fPRgagk5VqHBZ3wARf3rEOE,4683 +openai/types/beta/realtime/response_create_event.py,sha256=46i-O9wwvhr1CzHNMDzhs2SGVwHiFJDOkQfOZZRfAWo,4763 +openai/types/beta/realtime/response_create_event_param.py,sha256=IPJlTWH0HzsknpSRrFgrQ3bfxsFZVRdQ6IYEsiGSZOk,4619 openai/types/beta/realtime/response_created_event.py,sha256=zZtHx-1YjehXxX6aNE88SFINDaKOBzpzejo6sTNjq9g,506 openai/types/beta/realtime/response_done_event.py,sha256=_yUPoECCli89iHLtV3NQkXQOW6Lc1JlxVPFw04ziBGY,494 openai/types/beta/realtime/response_function_call_arguments_delta_event.py,sha256=Yh2mQZDucfnTLiO8LRyG9r7zeS1sjwLcMF1JPMdTFJc,793 @@ -930,23 +1180,23 @@ openai/types/beta/realtime/response_output_item_added_event.py,sha256=-_BZjvAqcg openai/types/beta/realtime/response_output_item_done_event.py,sha256=0ClNVMZmeIxKghlEid9VGoWiZ97wp00hIdNnev4qBD8,709 openai/types/beta/realtime/response_text_delta_event.py,sha256=B1yyuc6iMOMoG5Wh6W5KoQNYtVD1vEm2cKqHnl2CuFQ,721 openai/types/beta/realtime/response_text_done_event.py,sha256=mPgVG6nWxwkZ3aZOX-JkVF7CpaWP5-bvtbxFrr4fK7g,724 -openai/types/beta/realtime/session.py,sha256=vk_fFSSpLNVsne5gK1IMyiHh4C3dsavoCCRalnvAyc8,10196 -openai/types/beta/realtime/session_create_params.py,sha256=LhanVmuP4TDyMbPJJuhKpm0fQf3R_RenSk7dgssdt-o,10223 -openai/types/beta/realtime/session_create_response.py,sha256=PzFMvSt2We111uq47Gs5MpgzLngaFxKG481pHiDNaG0,6709 +openai/types/beta/realtime/session.py,sha256=fAG4Z404H11aHq46KbcwZV9DoZ4QZV8UWZRtEOLS2b0,10116 +openai/types/beta/realtime/session_create_params.py,sha256=GJoh-uXky8uj0BX0_pw1LWFUmmIb7bpcg7HEsbH4hVc,10192 +openai/types/beta/realtime/session_create_response.py,sha256=HfCFE46q3IEfvLFEdU06DAg5GKIPlJjaU9DtKzKcr2U,6574 openai/types/beta/realtime/session_created_event.py,sha256=rTElnBlE7z1htmkdmpdPN4q_dUYS6Su4BkmsqO65hUc,489 -openai/types/beta/realtime/session_update_event.py,sha256=NJzgZ-YBiHS8D_Aam4O6WRrPmIkmSHjmBpZvl96xXEo,11255 -openai/types/beta/realtime/session_update_event_param.py,sha256=gLdcY61WG7c8cWOerW_6rHerbyvzITOekYZ4YWtint8,10731 +openai/types/beta/realtime/session_update_event.py,sha256=7OSTuP3u2-QqsR4W_ApQoBVkBD3akYmbrzJi6m608rg,11170 +openai/types/beta/realtime/session_update_event_param.py,sha256=0n9BfkIDIIRP3BIz3jltuOxokbii06ZoHIS4gKBzBjk,10690 openai/types/beta/realtime/session_updated_event.py,sha256=HyR-Pz3U9finVO-bUCvnmeqsANw-fceNvVqEIF6ey10,489 openai/types/beta/realtime/transcription_session.py,sha256=Soo2LuEMJtkUD2oPJ1E23GUcoUrYBiSu_UtbLUKemfw,3184 -openai/types/beta/realtime/transcription_session_create_params.py,sha256=djkUaaz5xIVNct2NTT-7htg_6I6yfkgk20L4aaUHdbM,5975 -openai/types/beta/realtime/transcription_session_update.py,sha256=6j5QC8GBnfBIt3K0uHWWHRs5QA0jrcH15KJqAj3LmVo,6693 -openai/types/beta/realtime/transcription_session_update_param.py,sha256=cadQsYH-xauX2QxbYzHfy1Yiyt7BN__3PjjLE-5wVso,6452 +openai/types/beta/realtime/transcription_session_create_params.py,sha256=BVwSY41UX2njXAJpWynMJtC5XuKv6sNs7kp2Y8KSjnk,5976 +openai/types/beta/realtime/transcription_session_update.py,sha256=YMP9OB9P5FaSwaicXtYELjm4hD1gDSvKFq9YtF2sq64,6694 +openai/types/beta/realtime/transcription_session_update_param.py,sha256=b99v4yKnB2lC_cnYGiaxKnQuHB4eUW-v3eKT2UDsamk,6453 openai/types/beta/realtime/transcription_session_updated_event.py,sha256=CKAS98QL7CuOVEWF6qGcC9qhTktdG2CPPJXbrW75GIM,833 openai/types/beta/thread.py,sha256=RrArSK1-_prQY_YBexgD_SU87y_k2rmRq_tti66i7s4,2132 -openai/types/beta/thread_create_and_run_params.py,sha256=qbNAG1qC6iWmWTmo1fvJeQ7BprMohPqFMr9TDYpwxeM,14793 -openai/types/beta/thread_create_params.py,sha256=WrCDxLq9adjyDM5Hma6Jy146mKD1FpZ6AqRRVwBOEtw,6500 +openai/types/beta/thread_create_and_run_params.py,sha256=MPLDgaZ69PR-WZRPN_zwwF1--Cg9d99oD3rC2aWiCCk,14875 +openai/types/beta/thread_create_params.py,sha256=T0ok3yJ6ZXqPbX5dqwpQp7YFWCXoAhz_zbroMo6rPDQ,6561 openai/types/beta/thread_deleted.py,sha256=MaYG_jZIjSiB9h_ZBiTtpMsRSwFKkCY83ziM5GO_oUk,292 -openai/types/beta/thread_update_params.py,sha256=FXMPLWIBCmWJnZ3Ktdn8PkSvyA4_Tx0HHzVovBs_lOU,1877 +openai/types/beta/thread_update_params.py,sha256=eN1VyP4lk6puJseydX9Ac9CLZLobYQJcijEWk1RlHKc,1928 openai/types/beta/threads/__init__.py,sha256=0WsJo0tXp08CgayozR7Tqc3b8sqzotWzvBun19CEIWc,3066 openai/types/beta/threads/annotation.py,sha256=Ce3Y0mSodmYRkoqyhtyIdep6WfWew6KJJgtrENOnfek,462 openai/types/beta/threads/annotation_delta.py,sha256=iNsE-1Gn1yU0TlTHoxqKbOvPRUxWuXsF72qY_mMnWGY,510 @@ -979,8 +1229,8 @@ openai/types/beta/threads/message_update_params.py,sha256=XNCSLfRkk531F8mNbUB9bR openai/types/beta/threads/refusal_content_block.py,sha256=qB9jrS2Wv9UQ7XXaIVKe62dTAU1WOnN3qenR_E43mhg,310 openai/types/beta/threads/refusal_delta_block.py,sha256=ZhgFC8KqA9LIwo_CQIX-w3VVg3Vj0h71xC1Hh1bwmnU,423 openai/types/beta/threads/required_action_function_tool_call.py,sha256=XsR4OBbxI-RWteLvhcLEDBan6eUUGvhLORFRKjPbsLg,888 -openai/types/beta/threads/run.py,sha256=erWl8z0MiFq9_dbFb_HN6AHdUru_H3NFM97OTZjBECE,8337 -openai/types/beta/threads/run_create_params.py,sha256=RsoF4VQs3sijm9zHNJTHZnbGW_uvTphrgykkrhQmqpA,10316 +openai/types/beta/threads/run.py,sha256=cFOL77mXgELKefaRVN9Ds2jKoxYtBYwE6-82iegarcA,8338 +openai/types/beta/threads/run_create_params.py,sha256=qVaLiQDkZBBRAH5iaV8vRKycStlF1SCbEErQpp4SfOQ,10307 openai/types/beta/threads/run_list_params.py,sha256=TgepSLrupUUtuQV2kbVcoGH1YA0FVUX9ESkszKuwyHY,1210 openai/types/beta/threads/run_status.py,sha256=OU1hzoyYXaRJ3lupX4YcZ-HZkTpctNE4tzAcp6X8Q9U,351 openai/types/beta/threads/run_submit_tool_outputs_params.py,sha256=cKiyD374BsZN_Oih5o5n5gOf_DYsxErVrbgxveNhmPI,1643 @@ -1011,52 +1261,65 @@ openai/types/beta/threads/text_content_block.py,sha256=pdGlKYM1IF9PjTvxjxo1oDg1X openai/types/beta/threads/text_content_block_param.py,sha256=feQr0muF845tc1q3FJrzgYOhXeuKLU3x1x5DGFTN2Q0,407 openai/types/beta/threads/text_delta.py,sha256=2EFeQCkg_cc8nYEJ6BtYAA3_TqgMTbmEXoMvLjzaB34,389 openai/types/beta/threads/text_delta_block.py,sha256=pkHkVBgNsmHi9JURzs5ayPqxQXSkex3F0jH0MqJXik0,448 -openai/types/chat/__init__.py,sha256=Ecu39-qSoX-TSW_2uHKshAEfeaexm0mZcHprAnzOR1s,4276 -openai/types/chat/chat_completion.py,sha256=jtjSQDCqcziOSlfsc7zt3amrDiZNCbb-OROEl5sU9I8,3492 -openai/types/chat/chat_completion_assistant_message_param.py,sha256=E6ZrsjEN_JHOHO-wC7Uk90Fa7Qz7bfgx8jea0z6g30s,2421 +openai/types/chat/__init__.py,sha256=wyA0EWb0utj19dX0tCeGh4Jg5GrO3TGjmfQkR9HVxxE,6102 +openai/types/chat/chat_completion.py,sha256=cQQEYFoF1Cs3Xy_nskiwo0nrDGmdu5t4TtiQ03xA8T4,3488 +openai/types/chat/chat_completion_allowed_tool_choice_param.py,sha256=kQgAzwedjhFLqSzkhI59rJ2ZtfyMIhBQf09I9oJvpII,636 +openai/types/chat/chat_completion_allowed_tools_param.py,sha256=q7PeluUYm0xA9EbwwHdbbk72obyFyuChFDfG4zwIBto,1010 +openai/types/chat/chat_completion_assistant_message_param.py,sha256=aFQSXpV7XNWXSrpQuCjMNGyqq7sbCtpMIsJ1c8M2HFg,2437 openai/types/chat/chat_completion_audio.py,sha256=ioAcuhkIdk1TSZK1LqTXYcjTPxoaM2b0RhGJekyCABY,655 -openai/types/chat/chat_completion_audio_param.py,sha256=DMsgSEGm0EEne5b0ONgHlLbcN70g2Ji6orIhi6E3FxU,840 -openai/types/chat/chat_completion_chunk.py,sha256=DfoqNokLJV1NSjhm4nHlf0kduqx_AjjBK02MPnBbXFw,6010 +openai/types/chat/chat_completion_audio_param.py,sha256=FLcawzQQpYPC4_yC7h3hOvFa1NyvLECKGSAoKB1d-Mk,811 +openai/types/chat/chat_completion_chunk.py,sha256=zySvwbuKEVrPLuKVZgPUsIqq1D4nRjSp1U6nCkVFxGI,6006 +openai/types/chat/chat_completion_content_part_image.py,sha256=G51SQ-Pjc2FO8vtq_DizAlPe7WhloVZMK7L84Y2kECI,753 openai/types/chat/chat_completion_content_part_image_param.py,sha256=Gqv98qyD8jB81THZp49c8v2tHrId_iQp4NzciT9SKI0,797 openai/types/chat/chat_completion_content_part_input_audio_param.py,sha256=r1EXNEtjJo5oJ9AnP3omaJzACE1gSfdmob5Q0HKsOm4,704 openai/types/chat/chat_completion_content_part_param.py,sha256=0S9iFE1p93HG_Yx7Wj_TR2CmBNK_i7TaWE7HuE-tLc4,1259 openai/types/chat/chat_completion_content_part_refusal_param.py,sha256=TV1vu-IgrvKa5IBlPSIdBxUaW8g1zDhMOOBOEmhU2w0,467 +openai/types/chat/chat_completion_content_part_text.py,sha256=A9WfAYjt-8fbCzEn8kC9pTpK9e2G0aua58FqssXXfrY,363 openai/types/chat/chat_completion_content_part_text_param.py,sha256=4IpiXMKM9AuTyop5PRptPBbBhh9s93xy2vjg4Yw6NIw,429 +openai/types/chat/chat_completion_custom_tool_param.py,sha256=n-ThsvnkdKvRePzRdHEjikeXtju4K9Uc-ueB4LnByyM,1638 openai/types/chat/chat_completion_deleted.py,sha256=O7oRuPI6YDa_h7uKnEubsjtw8raTcyVmVk95hoDfo74,470 openai/types/chat/chat_completion_developer_message_param.py,sha256=OCFKdTWkff94VtgY7AaDUUFiZLT8LBn7WWxjbcIq2OM,830 openai/types/chat/chat_completion_function_call_option_param.py,sha256=M-IqWHyBLkvYBcwFxxp4ydCIxbPDaMlNl4bik9UoFd4,365 openai/types/chat/chat_completion_function_message_param.py,sha256=jIaZbBHHbt4v4xHCIyvYtYLst_X4jOznRjYNcTf0MF0,591 -openai/types/chat/chat_completion_message.py,sha256=-42ZyMvih2Cz20W-o3ahIv_m0WA7H8i612mFIBpNAuA,2511 +openai/types/chat/chat_completion_function_tool.py,sha256=Yw3wlkMQPjs-j2JQaBEcbxtXv9b0w2FJryRPegWknjc,445 +openai/types/chat/chat_completion_function_tool_param.py,sha256=isNPdszq2CXOZB6a-ALjTBRaX8T-BeToe2tApMepmto,519 +openai/types/chat/chat_completion_message.py,sha256=iC4SMjhTCVVO1Xueb_iAObMB_nLRc_PFxasfZK0A1kM,2521 +openai/types/chat/chat_completion_message_custom_tool_call.py,sha256=fbnL3fERlW4E9hd5EoCcb43zgCoaPc11tZ0AlBjoegM,643 +openai/types/chat/chat_completion_message_custom_tool_call_param.py,sha256=OvZxmUFfz7SDl55gvfscHaKPHUe8DmV83JzkQhJQplo,752 +openai/types/chat/chat_completion_message_function_tool_call.py,sha256=9KJxJ6T40mFBtznBnPE3wfHlzhQtNG_ayrn3ZYuIlyA,916 +openai/types/chat/chat_completion_message_function_tool_call_param.py,sha256=V09BFjYcP2pYigtrBfFtg6PfEPKbD0E6MAUxrDWyn_g,1025 openai/types/chat/chat_completion_message_param.py,sha256=aLrz_cX_CYymFdW9cMIPZpv0Z4zM50RECV3SH6QNZsc,1019 -openai/types/chat/chat_completion_message_tool_call.py,sha256=XlIe2vhSYvrt8o8Yol5AQqnacI1xHqpEIV26G4oNrZY,900 -openai/types/chat/chat_completion_message_tool_call_param.py,sha256=XNhuUpGr5qwVTo0K8YavJwleHYSdwN_urK51eKlqC24,1009 +openai/types/chat/chat_completion_message_tool_call.py,sha256=aWpKcV6NZZfx_-aGEwPz99IDWNCdRuwoYpUChs0Uvvc,738 +openai/types/chat/chat_completion_message_tool_call_param.py,sha256=rE_TbdN3N6JGzHecykgdFHZgI66p2lsl0loPpz5TxW0,458 +openai/types/chat/chat_completion_message_tool_call_union_param.py,sha256=L8IoSHXgIFxYyHSfXQJNN7FJlp31ez8X4l5eSPKGmYM,602 openai/types/chat/chat_completion_modality.py,sha256=8Ga0kruwJc43WD2OIqNudn7KrVRTPDQaalVkh_8bp9I,236 -openai/types/chat/chat_completion_named_tool_choice_param.py,sha256=JsxfSJYpOmF7zIreQ0JrXRSLp07OGCBSycRRcF6OZmg,569 +openai/types/chat/chat_completion_named_tool_choice_custom_param.py,sha256=K7LbF_AYWRohfzsVj8iCYNYePdAmqsqWmWoQBw_nsXk,565 +openai/types/chat/chat_completion_named_tool_choice_param.py,sha256=bS9rzU0SzIZCQCfOlEoRaRtFr10oIUV9HRQ_-iv6W0M,559 openai/types/chat/chat_completion_prediction_content_param.py,sha256=Xw4K_4F379LsXENOpZvREDn55cCnbmZ69xa4fw9w3bg,868 openai/types/chat/chat_completion_reasoning_effort.py,sha256=9sAGlM21dgRNOQRSsL_znZf9ruXcmvVriWeex0fRgMk,235 openai/types/chat/chat_completion_role.py,sha256=LW6-tqXaqpD7H53PiSXrjvIo6g4RfHhWityDm6Nfvig,275 -openai/types/chat/chat_completion_store_message.py,sha256=F2VcGoWEtXtWZc6-91rqTWj919zm_-nfoeGCdKt7edM,305 -openai/types/chat/chat_completion_stream_options_param.py,sha256=MOtUGVQvdZZZvyaAT-8qK9oXzVW3NbtSICt9ysdrmh4,773 +openai/types/chat/chat_completion_store_message.py,sha256=krUE7xzu6DWc64_yAOABOGfM8-aFeE59HDF1QLoOgek,916 +openai/types/chat/chat_completion_stream_options_param.py,sha256=5didkVskgUUcVH6BjfCnA6hG4lp9LOiBU7cDnx3abh0,1311 openai/types/chat/chat_completion_system_message_param.py,sha256=WYtzmsNP8ZI3Ie8cd-oU7RuNoaBF6-bBR3mOzST9hMw,815 openai/types/chat/chat_completion_token_logprob.py,sha256=6-ipUFfsXMf5L7FDFi127NaVkDtmEooVgGBF6Ts965A,1769 -openai/types/chat/chat_completion_tool.py,sha256=Zc_nRaV7pVOR3IAPtDfRh4DY3Ua5oxOmW_C_M2VC-nU,429 -openai/types/chat/chat_completion_tool_choice_option_param.py,sha256=ef71WSM9HMQhIQUocRgVJUVW-bSRwK2_1NjFSB5TPiI,472 +openai/types/chat/chat_completion_tool_choice_option_param.py,sha256=wPIjU-eeybPjRFr28mx8Njp2OCrKw3Xpu0231z4Kz1A,758 openai/types/chat/chat_completion_tool_message_param.py,sha256=5K7jfKpwTuKNi1PTFabq_LHH-7wun8CUsLDh90U8zQE,730 -openai/types/chat/chat_completion_tool_param.py,sha256=J9r2TAWygkIBDInWEKx29gBE0wiCgc7HpXFyQhxSkAU,503 +openai/types/chat/chat_completion_tool_param.py,sha256=5hFt0Izat_o50JMJzspCYeB0gubilRDB3a6yIfGHoN8,431 +openai/types/chat/chat_completion_tool_union_param.py,sha256=smpIoekwuuXKQx9jRRB2cqc3L7_fmN5lB4IIJHlKhys,504 openai/types/chat/chat_completion_user_message_param.py,sha256=mik-MRkwb543C5FSJ52LtTkeA2E_HdLUgtoHEdO73XQ,792 -openai/types/chat/completion_create_params.py,sha256=FMLuEyChftnBrSZQp9icW4e-N4oL9a4EgeJ1u_8sxbc,15947 +openai/types/chat/completion_create_params.py,sha256=KM9_hrGMpifTmzvO9S2GU71Xlhl7zSDxm5yl8P9VFtM,17246 openai/types/chat/completion_list_params.py,sha256=QBKLa941_4fU2PAT2uLImYIfPZj-WdTqqpsy0vQ1b0c,931 openai/types/chat/completion_update_params.py,sha256=VRDF28qoonjrveHhw8BT4Yo_NlLsV2Qzd_KUUQ6AEG8,742 openai/types/chat/completions/__init__.py,sha256=nmKlohYbZmr7Pzv1qCDMSDbthcH6ySPFIgvXpHZtxK8,195 openai/types/chat/completions/message_list_params.py,sha256=IArlye40xGlMVIDHxsK9RX_5usPL71wXPMgdwI7_wYU,583 openai/types/chat/parsed_chat_completion.py,sha256=KwcwCtj0yexl6gB7yuOnyETRW-uUvNRYbVzPMkwCe5Q,1437 -openai/types/chat/parsed_function_tool_call.py,sha256=hJzcKOpzf1tnXC6RGbPhaeCawq8EFdnLK_MfRITkW1U,920 +openai/types/chat/parsed_function_tool_call.py,sha256=JDWYo1XhTDQ8CxssbgjpzBhUw8jeXAmEd5Tr_CqFrVA,945 openai/types/chat_model.py,sha256=yFvzwm6VJXCn6jN21FS-utN6bcBBzRIpKYk1VTP8sdo,177 openai/types/completion.py,sha256=yuYVEVkJcMVUINNLglkxOJqCx097HKCYFeJun3Js73A,1172 openai/types/completion_choice.py,sha256=PUk77T3Cp34UJSXoMfSzTKGWDK0rQQwq84X_PSlOUJo,965 -openai/types/completion_create_params.py,sha256=HzCjUtFl-iSyL-6OEaFcnv9fwMc0sUjzA6RN-VNlAco,7602 +openai/types/completion_create_params.py,sha256=UqgYjUpYbQYPdYETVxhkwgbGRKTQCBoyeSFtrB8iuAo,7652 openai/types/completion_usage.py,sha256=uf5n0vzlCkGAU67BBn_h7yhjd_G4OHpQbJnvzz0eO2A,1735 -openai/types/container_create_params.py,sha256=b_GnsDd-4nV5ecTqgxf630UkVe92fPP6zjw00nQH5-4,791 +openai/types/container_create_params.py,sha256=119x8wG_Cz_IC-N1ha02h9IlHBjh8DPHOkr2o6FiMa8,814 openai/types/container_create_response.py,sha256=5tItbVA4xiJRcJMvqPbSoIIO49n-Hmtq_MnLBz_ww-w,1129 openai/types/container_list_params.py,sha256=7RiUMBOEJj9QH9LYtPiwUrIufx8czF6kk2JcfO_LP_s,893 openai/types/container_list_response.py,sha256=LVxHagc20cMD9brVMMJaQ-LTO-9uACqF8nUupsO1bsY,1125 @@ -1068,11 +1331,35 @@ openai/types/containers/file_list_params.py,sha256=9bU7uKeXPk7adFzwvKHFitFOV4phn openai/types/containers/file_list_response.py,sha256=xwvdMIUafkHSXJGQT1_mxt6T_8nJo-isp9M_5YTq-J8,718 openai/types/containers/file_retrieve_response.py,sha256=wGPU9o5SKkg8s4aUJXhwC38u8KfTFKmIUk1ItUdYxJg,726 openai/types/containers/files/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122 +openai/types/conversations/__init__.py,sha256=NVR5PMMZIK1uQJnvTHjZ9Lun-odb8dbPyrrVBlCnb4o,1754 +openai/types/conversations/computer_screenshot_content.py,sha256=yJ-i6Z9VxHt21iuz2K9i0saVWOsMzpLjqjjDoob1AAk,632 +openai/types/conversations/container_file_citation_body.py,sha256=aMPO-j2hW1RL0ow9PZBFwhJPBtcw9GyI1h38H9t1-wc,779 +openai/types/conversations/conversation.py,sha256=BVpec4hLHle_8iRf6v5y4CPYHtMhEntP0m8PDG_5GSY,886 +openai/types/conversations/conversation_create_params.py,sha256=SCdFXb83in1E5yHvqemnbL_E7UKqahJtjdnZlJr-u94,791 +openai/types/conversations/conversation_deleted_resource.py,sha256=HagMTsOrDL7QYQSeZqMbBMfRzhWAgnrxtinGT5uhog4,326 +openai/types/conversations/conversation_item.py,sha256=gvJBsNG0n00pKeQ3UTV_6v-FX8gloDQvusdHaYinL8g,6189 +openai/types/conversations/conversation_item_list.py,sha256=FvZW9mcZsKpaWNAI1PRuBtnKWt8vB1PEbDLmKN7ZF5o,667 +openai/types/conversations/conversation_update_params.py,sha256=1pMhZ2h6S1hYDDQ8CYABe8AR7LvMHN__AS5Zwv7ow-8,690 +openai/types/conversations/file_citation_body.py,sha256=agp2SCw8qpCuqO9aBROe0PRU4zwmXZssG72IlnDpx7Y,507 +openai/types/conversations/input_file_content.py,sha256=tlJEV311aNIX7rvJZTOO4XWPScjSh3Wqz1m9ueRSO44,619 +openai/types/conversations/input_image_content.py,sha256=j1msb8zvMsSSl3aNaLajpVuHymHj7VmOw9hz5mMmlLs,776 +openai/types/conversations/input_text_content.py,sha256=NdydUw15t9lE6XGMFEsLqmV1q97pqXS6iDGDCL_OIBI,373 +openai/types/conversations/item_create_params.py,sha256=TRAsvDuMBjLeL5DzqC-WyqmorZTnu4qZRt9eE13SJ8E,874 +openai/types/conversations/item_list_params.py,sha256=nMzeK_XkVTWsa5pMQDGDuRPfGwiXFBDcdZ4NYwYV7H4,1896 +openai/types/conversations/item_retrieve_params.py,sha256=lHK-Sqbd7DXWQKuXGRBUvu_a7LxYNAT_tBQqLP-OC5A,690 +openai/types/conversations/lob_prob.py,sha256=fujCb3k899Av1ClINZ6_epXEmbGIwEgJ4T47xhMF_aI,326 +openai/types/conversations/message.py,sha256=NWRYPevQNqwzrtA4pUKxuaTchm03o0H3yqzoePbI-Hg,1685 +openai/types/conversations/output_text_content.py,sha256=l8ExWWlQsupNjsnD_r7vwIDGOKlYMJMRU5kE2rspplM,960 +openai/types/conversations/refusal_content.py,sha256=grqT15gUQeogX_D4442txb8sINJl7r-a97wS_AImxpE,374 +openai/types/conversations/summary_text_content.py,sha256=NEPKcJ267hbd2ulGhBz3a_KOQWvoh5SBZFU1uYzZ3dg,281 +openai/types/conversations/text_content.py,sha256=SV7snTCpe8X3CJy1T1uOMiFn0IyZjWzj7GCtPJRezv8,259 +openai/types/conversations/top_log_prob.py,sha256=Xt1v7njpqld75G7acGOFhpyHJF_MQR_FRnbHxymJeC8,259 +openai/types/conversations/url_citation_body.py,sha256=N2tJPNrNu8kCcDd5iUjspOeE87LY63yxISk0xb2Y3Tc,632 openai/types/create_embedding_response.py,sha256=lTAu_Pym76kFljDnnDRoDB2GNQSzWmwwlqf5ff7FNPM,798 openai/types/embedding.py,sha256=2pV6RTSf5UV6E86Xeud5ZwmjQjMS93m_4LrQ0GN3fho,637 -openai/types/embedding_create_params.py,sha256=vwV8t94f-_2ueVou3XLNxL1O7Yiu95Wg78p91o3BOiM,1999 +openai/types/embedding_create_params.py,sha256=asahWWNcMvXGDfbTMz4uDy7DU9g6OJ9wowqZByghzw8,2039 openai/types/embedding_model.py,sha256=0dDL87len4vZ4DR6eCp7JZJCJpgwWphRmJhMK3Se8f4,281 -openai/types/eval_create_params.py,sha256=fOLI2-P21iPUrmp24SaEesw2cfPqeoQMLlG59qYic84,5991 +openai/types/eval_create_params.py,sha256=EMEE1XtHP_AGF_R3ptJe55-uNbfvThBmKzN-sEq49mo,6703 openai/types/eval_create_response.py,sha256=h8o7zz_pat94dmryy2QDMOK3Lz-szPkmD52faYtBK0c,3531 openai/types/eval_custom_data_source_config.py,sha256=-39Cjr1v2C1Fer4PLl7rfA-bDK08I-bM4cqlp9Z_mzE,589 openai/types/eval_delete_response.py,sha256=iCMGN0JG5kFIYNPSCOMSWlTu0FDkd2lbAw1VLO73-bQ,245 @@ -1083,18 +1370,18 @@ openai/types/eval_stored_completions_data_source_config.py,sha256=7CYy14MMLj6HBJ openai/types/eval_update_params.py,sha256=Wooz-3SDznbC3ihrhOs-10y9cxpTKGQgobDLfZ-23c0,757 openai/types/eval_update_response.py,sha256=D9ItfznRN1jwp_w48r-i4jvH1_h2uiSpleHePrVigJs,3531 openai/types/evals/__init__.py,sha256=wiXRqdkT-SkjE0Sgv6MixeECZjF0xaoCPdSGFEh0rEs,1193 -openai/types/evals/create_eval_completions_run_data_source.py,sha256=ooTUcbQOviHxYIo78L_PDYjFrgqZ9QwvbxXJe2O6l20,6785 -openai/types/evals/create_eval_completions_run_data_source_param.py,sha256=6THoH0DzHLl_I4eJzxil9jysz_byvpW0fj3qcAjxT20,6774 +openai/types/evals/create_eval_completions_run_data_source.py,sha256=pk67t6hX20xmLQFrNHHLKV-HHXTobSIUR9hKErjTySU,7387 +openai/types/evals/create_eval_completions_run_data_source_param.py,sha256=sxIEL3u9ZWLrVyIV_TXXCTULe8i2EfUlVm5JSKsdfCc,7465 openai/types/evals/create_eval_jsonl_run_data_source.py,sha256=GzE9S1AZy46LOooR61Nwmp5yGUMoFGU5yk4g18BP72E,1219 openai/types/evals/create_eval_jsonl_run_data_source_param.py,sha256=sM4-h4qDDkttGeaKgip8JZeuiaghPTBmwwxb5Xa6zhk,1285 openai/types/evals/eval_api_error.py,sha256=VvRO-N9_tIxpRiSi17PXiMpleowg_Y-Rq2kqiRgmpC4,268 -openai/types/evals/run_cancel_response.py,sha256=6ot0HJLj5fivi1NGBMW96bqRK7qJNnWxkng_7w_GYto,12404 -openai/types/evals/run_create_params.py,sha256=kwWc1BZ8ayGicXRzClPwm8H0D_8APV5xNMvorBfXDsw,11310 -openai/types/evals/run_create_response.py,sha256=ZBbkEfg6r6J_dGpw-UgLljl-kVptOB_MjQiJZvJMMXc,12404 +openai/types/evals/run_cancel_response.py,sha256=p5xKrc0o4kNXLKZTyBT4HBZu6vYnGSJrXce3y9W-11c,13082 +openai/types/evals/run_create_params.py,sha256=vR0N6Q0esjeN-CZawFyvT4YcYQjOruWOf1tcmfbHjac,12141 +openai/types/evals/run_create_response.py,sha256=6TtQW31yzsHryWqLjOiEKnNfBpURl0jDfpJnKixqF_A,13082 openai/types/evals/run_delete_response.py,sha256=WSQpOlZu53eWBCXSRGkthFn_Yz5rDCcSomqoa4HpUrk,323 openai/types/evals/run_list_params.py,sha256=vgbJMYybzCep7e9rxUVHlWy_o4GNy4tJyGTwNu4n4ys,758 -openai/types/evals/run_list_response.py,sha256=ZyOFgkMEjb9VDljMYJZNKqIZKLb4fjxkqeuYskNgLkI,12400 -openai/types/evals/run_retrieve_response.py,sha256=ZGvwQapNy-ClYboTEpsREO0hw0wNZCQlVt40U9Pfr6Y,12408 +openai/types/evals/run_list_response.py,sha256=DGJBYaVbUDhWpRQlUB7RIOAv3E9I_ElZKHP7-lCU128,13078 +openai/types/evals/run_retrieve_response.py,sha256=uZnljgbHbRgq57tqS-MhCm1htZuEQ_vuxXH6ZygELhM,13086 openai/types/evals/runs/__init__.py,sha256=sltNV1VwseIVr09gQ5E4IKbRKJuWJSLY1xUvAuC97Ec,393 openai/types/evals/runs/output_item_list_params.py,sha256=Lp1OQV1qXeEUwMS90_-BpOnO1jICwJOo9QgNC9OGJ2U,821 openai/types/evals/runs/output_item_list_response.py,sha256=YwVwZG2Fo1rPtJMCfVd8_RYRsaHYZEr5DzUZ9n6GJkk,2747 @@ -1102,10 +1389,10 @@ openai/types/evals/runs/output_item_retrieve_response.py,sha256=byZGq7mRUwQcWRQg openai/types/file_chunking_strategy.py,sha256=oT5tAbwt3wJsFqSj2sjDPBcisegNwJOecxS_V7M4EdA,559 openai/types/file_chunking_strategy_param.py,sha256=mOFh18BKAGkzVTrWv_3Iphzbs-EbT6hq-jChCA4HgAE,517 openai/types/file_content.py,sha256=qLlM4J8kgu1BfrtlmYftPsQVCJu4VqYeiS1T28u8EQ8,184 -openai/types/file_create_params.py,sha256=13FFRoLfKObvYRKrt-HOmvcSL4uE_UzscyD753F4bEA,776 +openai/types/file_create_params.py,sha256=Ame7qem1zNkBzHFLv5AOB1DnrIgAsIGdzOr6dr3NWZc,1394 openai/types/file_deleted.py,sha256=H_r9U7XthT5xHAo_4ay1EGGkc21eURt8MkkIBRYiQcw,277 openai/types/file_list_params.py,sha256=TmmqvM7droAJ49YlgpeFzrhPv5uVkSZDxqlG6hhumPo,960 -openai/types/file_object.py,sha256=ykZlEs6ysU_YhXkeW-RgEngvtOSt6v9cwcZanNDA5jQ,1420 +openai/types/file_object.py,sha256=Qu0rci3ec0iPh36ThAK4tiCN_BRmULnOFU8jzzFYhB4,1504 openai/types/file_purpose.py,sha256=aNd8G-GC1UVCL9bvTgtL4kfkiF0uEjfiimRS-eh8VrY,265 openai/types/fine_tuning/__init__.py,sha256=f8GH2rKGcIU1Kjrfjw5J0QoqlsC4jRmH96bU6axGD64,1832 openai/types/fine_tuning/alpha/__init__.py,sha256=e_Evj3xLs7o_SONlqoXDM75oZMbxuGWhxBW-azsXD_w,429 @@ -1114,7 +1401,7 @@ openai/types/fine_tuning/alpha/grader_run_response.py,sha256=So-fvQMRvpccsSYb0jf openai/types/fine_tuning/alpha/grader_validate_params.py,sha256=Jd6m3DjIZAUNY-PlLUWDbH3ojm8ztnfjHmPjKw2DrLM,875 openai/types/fine_tuning/alpha/grader_validate_response.py,sha256=nLldMLyNG-ICS3HwykDWdKuAPKu4gR2A2I0C79C4khs,773 openai/types/fine_tuning/checkpoints/__init__.py,sha256=xA69SYwf79pe8QIq9u9vXPjjCw7lf3ZW2arzg9c_bus,588 -openai/types/fine_tuning/checkpoints/permission_create_params.py,sha256=82NfnSaWaPneXDfhOyQjFaix5B6DkwMfwKIQmrnCnok,382 +openai/types/fine_tuning/checkpoints/permission_create_params.py,sha256=TI90xY-4dv8vDKKZ0FBdbly9JTCrw4FgXkcXz_gTUlk,407 openai/types/fine_tuning/checkpoints/permission_create_response.py,sha256=F-A0bNQ5iTNUDmtCbQwv1PUDrJWSsdymcbCqfiZ3TwE,636 openai/types/fine_tuning/checkpoints/permission_delete_response.py,sha256=X_RuOvxa6i3wGLP5joHixv4tNLUpuK-2umiUf6P7Ha8,558 openai/types/fine_tuning/checkpoints/permission_retrieve_params.py,sha256=3zVCOq1676MizKhKSba2OLmBSPlBx6Az2ZdxyVl580o,610 @@ -1128,7 +1415,7 @@ openai/types/fine_tuning/fine_tuning_job_event.py,sha256=POxSD7-WxAtJV2KuEpA9EmZ openai/types/fine_tuning/fine_tuning_job_integration.py,sha256=uNFfuBV87nUHQORNGVLP_HbotooR_e37Bgd0dyZ4nUM,241 openai/types/fine_tuning/fine_tuning_job_wandb_integration.py,sha256=YnBeiz14UuhUSpnD0KBj5V143qLvJbDIMcUVWOCBLXY,1026 openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py,sha256=7vEc2uEV2c_DENBjhq0Qy5X8B-rzxsKvGECjnvF1Wdw,804 -openai/types/fine_tuning/job_create_params.py,sha256=vRTo3w0ManSuszz0c7dMbNrlt9zoCeGU5qp11UDmxAY,6115 +openai/types/fine_tuning/job_create_params.py,sha256=p42ebOzvo_ghEitjITP4Qg-mhUvQchreeDrd_FR5YKA,6178 openai/types/fine_tuning/job_list_events_params.py,sha256=4xOED4H2ky2mI9sIDytjmfJz5bNAdNWb70WIb_0bBWs,400 openai/types/fine_tuning/job_list_params.py,sha256=wUGXsQ4UDCKvAjHDZAZ-JDU6XAouiTGThb0Jo_9XX08,623 openai/types/fine_tuning/jobs/__init__.py,sha256=nuWhOUsmsoVKTKMU35kknmr8sfpTF-kkIzyuOlRbJj0,295 @@ -1143,67 +1430,232 @@ openai/types/fine_tuning/supervised_hyperparameters_param.py,sha256=WogLPJmKhsqg openai/types/fine_tuning/supervised_method.py,sha256=p9lV9DCi7KbkfOuZdytm1Sguqt-0AWtRiNawxxSuCgA,408 openai/types/fine_tuning/supervised_method_param.py,sha256=LNvDK4FdDWflr7KQHYBDcWP9UB5UBcGP3YohVsnvi7s,445 openai/types/graders/__init__.py,sha256=GiHbVTKVpfAqbbzZrtF-N00Njkr28cNG26wd_EDLPGI,1019 -openai/types/graders/label_model_grader.py,sha256=RuMB8WbE9DjqVC8j6VqGsUbnMpgPcdNO4sSTsQa1rFQ,1520 -openai/types/graders/label_model_grader_param.py,sha256=ZYUGIR2qAB3ByZKZgBYOhWvgWQ0RsvKFlnhaWNvCi-M,1696 +openai/types/graders/label_model_grader.py,sha256=z7YmiMn7suYk5CbIFAn8MLTnYeJgxhJNiDcI5S4rDGQ,2026 +openai/types/graders/label_model_grader_param.py,sha256=21MydaUGP5Y5zBW61ShSxwtpzY-NcC3gGJaaCWka1KU,2310 openai/types/graders/multi_grader.py,sha256=QyTkY28D7_DyZHOdlTCpLHHyzWFYDs8KT4-30_XgSLY,1018 openai/types/graders/multi_grader_param.py,sha256=6-AOnwpdJt5yGBqdtSu7fPOIav0GuipZMg5ZnDskYtc,1191 openai/types/graders/python_grader.py,sha256=WnZ24W9dtfqX8ZEPgVArYNkyAQElz2j-6no03u1wcU0,534 openai/types/graders/python_grader_param.py,sha256=ss-fnK1MZe9eDLvFd2sz1AayD3cbuIMBn3mXCDUZMb8,565 -openai/types/graders/score_model_grader.py,sha256=GLjlA53MNS6oKMdIVsoRimKmRf06XH396j8CWfQKc5I,1542 -openai/types/graders/score_model_grader_param.py,sha256=W1T0pKNY9i5EHYN1NP7xpRiV2AMXduwpdya3AiXm4jU,1662 +openai/types/graders/score_model_grader.py,sha256=Vf5JQNp8A1piMBOoQy9DoXOTI6U5urVDE4j2xXlkfXA,2048 +openai/types/graders/score_model_grader_param.py,sha256=zOofIIiQraIUJ56wRm5SrD018uxkbc0bLueAliD_8JA,2225 openai/types/graders/string_check_grader.py,sha256=Ofmiv6cZw6Le42M-XQ2p_IJqazRLN626xf_zie5LVKE,675 openai/types/graders/string_check_grader_param.py,sha256=gwIhLOMY4xyI6lKLwGTrTlorb98mODRATC1Ei2KbvrY,771 -openai/types/graders/text_similarity_grader.py,sha256=u4BsztOq6UXnnjAN9DX63XpRvAfNARrhrrd3ZsUzsiw,786 -openai/types/graders/text_similarity_grader_param.py,sha256=c7IXZQg8goxQpqTvfPTMBdiGsQmrMzHsOA0Q53Gis5U,904 +openai/types/graders/text_similarity_grader.py,sha256=SYoSbN81qi3_Q-y_l7H4B_ATbwfLlx_RnzY2J11f1FQ,887 +openai/types/graders/text_similarity_grader_param.py,sha256=fWPcnMC6Qp0NjsaQOm7wJ0eCumyXex99MX7URGm2ja4,1045 openai/types/image.py,sha256=cWbI4EZxZ_etXKGl0u-7sr3_fJEaWwP0RpJ2fSIDYfc,766 openai/types/image_create_variation_params.py,sha256=Xeka4vp5V0o8R_6vnLsqiQhWH5O6tUSCyO3FKGVmAeU,1426 -openai/types/image_edit_params.py,sha256=ntaDKxXKYfieWfJ9Il47wu_HSSW8XikqP7gwN-wGO6M,3335 -openai/types/image_generate_params.py,sha256=g_eqIj9riTo82x2vXjxPWT0W7KprB5NNuYporyPDPEY,3998 +openai/types/image_edit_completed_event.py,sha256=E19lxYAYTACjUME298BXryQdQZ0DnzWZPbzM636el6k,1736 +openai/types/image_edit_params.py,sha256=mm2OrOvhOK9-bnBNx9OU5qy-fQhrYwu29uYXyuADsKI,5330 +openai/types/image_edit_partial_image_event.py,sha256=kgMb_9JveHjePvhZFhUnj5-us1mdZhgzFaoOUPmFBLU,1095 +openai/types/image_edit_stream_event.py,sha256=GtHKc8VdumW5RnQtIiyMqhwIIaqYogKXZF1QNuq9Bd4,516 +openai/types/image_gen_completed_event.py,sha256=sA2Ezhl-Gwh0cPq3VFmDSZDD8yiO1i5mkB-BziIdqd8,1745 +openai/types/image_gen_partial_image_event.py,sha256=vTArcJ1v00opWXiP8iUr9L886cg1VUCtoJLL8NCUH8I,1077 +openai/types/image_gen_stream_event.py,sha256=gVzdE6qzBPpK3kEFM7EdoUzBa4DgCaS3AdF9gjd0pUs,508 +openai/types/image_generate_params.py,sha256=7GBDjcoEdKX8buOK6IroOn7eH9GFkcXb-LGYKfVuRMU,5323 openai/types/image_model.py,sha256=v8nkOop8L8LS6WSMhl4poJ0edMN9Khkdn9epylLQDvE,234 -openai/types/images_response.py,sha256=pjyldIxYKzezkUIJ8HPz9tScPhSFt8i_Px8n0JW-fV4,1210 +openai/types/images_response.py,sha256=cpbt5tKIax5JIDM4FSj3hjo2RO7AFN2pJPNQm4AWqeM,1905 openai/types/model.py,sha256=DMw8KwQx8B6S6sAI038D0xdzkmYdY5-r0oMhCUG4l6w,532 openai/types/model_deleted.py,sha256=ntKUfq9nnKB6esFmLBla1hYU29KjmFElr_i14IcWIUA,228 openai/types/moderation.py,sha256=6mV-unXrz5mA47tFzMNPiB--ilWRpOXlCtT5HKZE7vg,6840 -openai/types/moderation_create_params.py,sha256=EaZ2cej25g5WbRB2kIY7JFCXQPKSQQ95iyoUAAelGr4,992 +openai/types/moderation_create_params.py,sha256=bv5qr2y_MQ1MYBhWWUiCET2L18ypWtQpaIKzYTrl9xs,1032 openai/types/moderation_create_response.py,sha256=e6SVfWX2_JX25Za0C6KojcnbMTtDB2A7cjUm6cFMKcs,484 openai/types/moderation_image_url_input_param.py,sha256=t1r9WD3c-CK2Al1lpB4-DjfzLFSwgETR0g8nsRdoL0Y,622 openai/types/moderation_model.py,sha256=BFeqSyel2My2WKC6MCa_mAIHJx4uXU3-p8UNudJANeM,319 openai/types/moderation_multi_modal_input_param.py,sha256=RFdiEPsakWIscutX896ir5_rnEA2TLX5xQkjO5QR2vs,483 openai/types/moderation_text_input_param.py,sha256=ardCbBcdaULf8bkFuzkSKukV9enrINSjNWvb7m0LjZg,406 openai/types/other_file_chunking_strategy_object.py,sha256=Hf9XBL1RpF9ySZDchijlsJQ59wXghbVa0jp8MaEoC-4,310 -openai/types/responses/__init__.py,sha256=jcfcirJCsG_JQQ3pxTBMynN23A1cRcS4hx86-l8SByw,13270 +openai/types/realtime/__init__.py,sha256=0TmyntMzkZibpUpZzkosNpMcycv0w32QlNEjh8cb9Qo,16664 +openai/types/realtime/audio_transcription.py,sha256=p3L99f6gmcnYTNfde6bUsevaRPpVCDh-CXiHcyHYrGg,1209 +openai/types/realtime/audio_transcription_param.py,sha256=ZIvJL36zOMYEZ3A5fXplV3lgx64oVTISyiGNvbXVKwE,1150 +openai/types/realtime/client_secret_create_params.py,sha256=g1pj1BB4T5ZvUltoj6BgfAEqzL2zCtMYV5Ai_ZJioLM,1674 +openai/types/realtime/client_secret_create_response.py,sha256=maHTZ6A_YogizgdV4jy5xOakvVMRUc6NRyWxzC9hObY,932 +openai/types/realtime/conversation_created_event.py,sha256=dJiXF9qKzTyPGFjURZYRrtu0np1ZtDpSYUpQgXPzrRo,751 +openai/types/realtime/conversation_item.py,sha256=BGqZp9UpybVbEyr6enYqdleryy4NMbXpzkUPX03cvoI,1437 +openai/types/realtime/conversation_item_added.py,sha256=3cMQ_vYbEUlnPTYFZmayW4dqkt-gpbzNxDHI0RJhWL8,742 +openai/types/realtime/conversation_item_create_event.py,sha256=-42Pp6Kswz74lpWr7sHbDI3FO4byz5TJvD3VLMNRwhg,1089 +openai/types/realtime/conversation_item_create_event_param.py,sha256=14RaZ7n5CRh6cKaOsOsN6n94MLLijFzY9KmltHnH8xk,1110 +openai/types/realtime/conversation_item_created_event.py,sha256=2err9ZwNCqt9oxy-jvp5y_T8C0_OkHl_KxJCwyHesaY,825 +openai/types/realtime/conversation_item_delete_event.py,sha256=Ao3zKkKF_JQyBwFK1fGojKw96cZjIfHziwvRyLPpgMQ,548 +openai/types/realtime/conversation_item_delete_event_param.py,sha256=a17h8Hd8MxUbXT6NQg8YpTr1ICt1ztRecpfukHw4g34,569 +openai/types/realtime/conversation_item_deleted_event.py,sha256=7dZc3PmGyeSwNGwHCvQgoHwYK4QN9kcv9kRPL4QfSak,491 +openai/types/realtime/conversation_item_done.py,sha256=2dlfFQgk0mSVCoOPUdXKbKShbzsesucxkk84bob_R1A,738 +openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py,sha256=1GEybZ5q1L8lH0p0lA-grhCmm8F8WN3mUcLAC-FG-vg,2440 +openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py,sha256=xy20zUa5uSj0HtefAbOq5ZgG_N4o-HkAbxecbIhvOhc,1349 +openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py,sha256=Gwm8rry9Tsv2eNkfrjsjDE69K9qmc27KXcps3zdCTGA,1076 +openai/types/realtime/conversation_item_input_audio_transcription_segment.py,sha256=hBP5di6OQ9u5fhKjKz6XKmy7T-An8orAlZfboIYukHo,990 +openai/types/realtime/conversation_item_param.py,sha256=yrtdTZDIfAsJGwSXDY7v-_e9GtOPqRNXQAM2LWjxOOI,1483 +openai/types/realtime/conversation_item_retrieve_event.py,sha256=qGlMQI_0YfoO11d0VgV5iVFLHMCjHErPWN516xn0h9s,558 +openai/types/realtime/conversation_item_retrieve_event_param.py,sha256=TRYaZ3btNaywRPaMOVRzK5VT7wh4taIGjbUdhkZ7gFc,579 +openai/types/realtime/conversation_item_truncate_event.py,sha256=IcWi21tiuaduW2S_-w5qSYZIIYEY5c-mRvpb54In_pM,944 +openai/types/realtime/conversation_item_truncate_event_param.py,sha256=-rMZ2Y1TJ-naH6g7Ht8dipjQRnOnSW8xWHrzT9Up4P4,985 +openai/types/realtime/conversation_item_truncated_event.py,sha256=W2L6YmRG-YQ3YZd0knL-EUL3_qPColjJj-DzdECYwv0,703 +openai/types/realtime/input_audio_buffer_append_event.py,sha256=iY7_Acz5Lu6Ul_2d-Ho0Tnjo4b8y-eZuztjsgJtqVPQ,661 +openai/types/realtime/input_audio_buffer_append_event_param.py,sha256=XmN2bE6jBRrkKGVPJdnPjJql5dqMPqwbmFnxo-z22JE,682 +openai/types/realtime/input_audio_buffer_clear_event.py,sha256=8qKqJLRpEhiMcGiuosO5TRx6e0qCIZq3F-TF-pWqIcU,488 +openai/types/realtime/input_audio_buffer_clear_event_param.py,sha256=y-zfWqJsh1n6r2i0MgLDpnNC4g1dq3GCS66Twfkng38,499 +openai/types/realtime/input_audio_buffer_cleared_event.py,sha256=L4tqLyaTqQGdBoZudMxF0BJNqT5-lUVWKuSudIFKA6U,428 +openai/types/realtime/input_audio_buffer_commit_event.py,sha256=gXFJz3MRKaOcjMB5MpmzPSM3tj5HHPxWSScpGJCVEpE,492 +openai/types/realtime/input_audio_buffer_commit_event_param.py,sha256=B8agXC-rUl-D-RijJ5MeTLgw43qVYzmf2_2oAVokhLY,503 +openai/types/realtime/input_audio_buffer_committed_event.py,sha256=_u1WZzg0jTPr4NEwav7dVpYJNDPjv0sI-4XzFSFlJj0,732 +openai/types/realtime/input_audio_buffer_speech_started_event.py,sha256=e7EScZ7HN3GL9jvQVMKz7qRZ6vPIsRO6yPGkHlFBvIs,860 +openai/types/realtime/input_audio_buffer_speech_stopped_event.py,sha256=VAHzgW0UzAQz2cA_j3VgVmfbiRJeH7GzhugeyAXGfC0,778 +openai/types/realtime/input_audio_buffer_timeout_triggered.py,sha256=Q8Og1NhtzVz_YLVm2VWk7ZqxB00lVn5H7anwvG12wVo,853 +openai/types/realtime/log_prob_properties.py,sha256=ADUI2Bprv-PM8EGaMLOo77UpbYQKttIOyLuR1lsEJd0,452 +openai/types/realtime/mcp_list_tools_completed.py,sha256=jc7_Cz3ZPrxrHFIoRaesudFHm7XLwEfbxASknRBR-1w,473 +openai/types/realtime/mcp_list_tools_failed.py,sha256=do32WvGDKI15Mcwp86_eUU1Yj3JGs7KOctwrddchlwM,461 +openai/types/realtime/mcp_list_tools_in_progress.py,sha256=4nOZiQCY6DAAxpST0K2wQGSvqsffgWczyyxaFgoPOJI,479 +openai/types/realtime/noise_reduction_type.py,sha256=lrAb7YhMM-anRYzzOha8hcVloNJR_zWuFrO2SccrcIo,238 +openai/types/realtime/output_audio_buffer_clear_event.py,sha256=VP4gqG3Mkc4n3uo_AuGzC9062yRAVc5h-wpRk0bga1g,493 +openai/types/realtime/output_audio_buffer_clear_event_param.py,sha256=gUnmGZhwaMW5tpfYkEjafmcjuxe6eVk0CwQsq_od0Pc,504 +openai/types/realtime/rate_limits_updated_event.py,sha256=lPYwNoaEopdkfIr5KDOz0ns1OJKEn-2HI209zpDzeuQ,948 +openai/types/realtime/realtime_audio_config.py,sha256=TXlcVjt8PwthyneFJ0kUqF4j2nwG-ubJyHENzrcd0gU,467 +openai/types/realtime/realtime_audio_config_input.py,sha256=48ANMlwtkmcH04CvcDXJxJYF4VIhaBzJSdXn3a3eV1Y,2874 +openai/types/realtime/realtime_audio_config_input_param.py,sha256=grQocM_NhSJlByguflJ7jc4H1O6CknI5Im_A85_NbDg,2926 +openai/types/realtime/realtime_audio_config_output.py,sha256=DdlfBuf2quGvKikEcNFRx8C41B-fO6iENK9L5Y6DCHA,1389 +openai/types/realtime/realtime_audio_config_output_param.py,sha256=elX8xA49gDbbFjvJv24dquBwJVyxtOJHIwHN6XWR2Vg,1371 +openai/types/realtime/realtime_audio_config_param.py,sha256=RcizKdhyXCLKrykVY3pQx_z_w4Oe1Xk5p2IqcHgvZu8,503 +openai/types/realtime/realtime_audio_formats.py,sha256=YTBxJ-D1AHA0EoaH1s-N99489Y57beSn7RiA6SjxIuQ,926 +openai/types/realtime/realtime_audio_formats_param.py,sha256=jtxa21eFt-2XkhMMadEvZ7MTv-itqCRWqDi4VEmMjwI,817 +openai/types/realtime/realtime_audio_input_turn_detection.py,sha256=b3xLXLZ7tsLoTKE2Ex-dqK5mir0QhA0ohpKEgErwDZg,3449 +openai/types/realtime/realtime_audio_input_turn_detection_param.py,sha256=ulsMAl_sDB6In9I9aGI1XERH-cVUtEyQPU9uyOtF0rk,3280 +openai/types/realtime/realtime_client_event.py,sha256=4_lYEyK-wj25VTh8GTaV0mZ0t17KhkfJrQ0yUF0mCYU,1473 +openai/types/realtime/realtime_client_event_param.py,sha256=YPveu8tNyKmZkK24qEJv8js5l5NNygDyAcsza2iOmKw,1543 +openai/types/realtime/realtime_connect_params.py,sha256=AvTypkFCYmDn9qMeektVqij6cqzgovr3PpgpMalJoJ4,290 +openai/types/realtime/realtime_conversation_item_assistant_message.py,sha256=g67lu3x-Z3zw9RdXyEOWTbmsDKlmRNZErtE510jMsy8,1715 +openai/types/realtime/realtime_conversation_item_assistant_message_param.py,sha256=vlSO9xgZHh099lbQU4FqngPEIgkNDB9AsFwatJeFR0I,1683 +openai/types/realtime/realtime_conversation_item_function_call.py,sha256=7HTj4l_AtGBPxRZqQ9JlY9uuBLrOIDatyBE_JVji9YU,1202 +openai/types/realtime/realtime_conversation_item_function_call_output.py,sha256=E5BtjqP6anIi9XpdVKtpd8pFh8SXoersKOpn6hbrS5o,1103 +openai/types/realtime/realtime_conversation_item_function_call_output_param.py,sha256=45NvbyGoO4V6lbeQn5mKck8SQJGHQb3xtgTy2GmnuqE,1100 +openai/types/realtime/realtime_conversation_item_function_call_param.py,sha256=hxeYcWk09Lota1TqIZvg5kXMu_0S0y9iDGJxPlzHmVA,1182 +openai/types/realtime/realtime_conversation_item_system_message.py,sha256=mq0tDiLi7r4bMRqI83lgnSF1uJwGsFUfhKr2181ELYI,1224 +openai/types/realtime/realtime_conversation_item_system_message_param.py,sha256=0iLyhkIE6xLzjDI7vqa-bbs73kWnaCQz8rHBujMY6nA,1226 +openai/types/realtime/realtime_conversation_item_user_message.py,sha256=N7jJ9WlJMabAyvldcGJzfVL1w2Nw-wDcBJma3lyIJeQ,2111 +openai/types/realtime/realtime_conversation_item_user_message_param.py,sha256=b6KnyeTZty254f5A2GCCoiH-cvIXffs9UrLJprlRSFQ,2045 +openai/types/realtime/realtime_error.py,sha256=1pg3if_lIqzP7Ow23UGQyqs8x0PLdiLIC-Ax79TLe6Y,625 +openai/types/realtime/realtime_error_event.py,sha256=fAosJOL7vMbG5JYMwzg8yrRBaT0INz4W_1XCxIUFzTw,466 +openai/types/realtime/realtime_function_tool.py,sha256=3CDiCZCM0A1VLRxOFdG4teFXr8dx0JFU94KbSn-JgGc,734 +openai/types/realtime/realtime_function_tool_param.py,sha256=-vDBSmMWNdbABC8dxVckkNeRdEujAKeff6icJvYrM0I,674 +openai/types/realtime/realtime_mcp_approval_request.py,sha256=Li-i-Sa7tfiI5nWA4Dyz4ac3_KTWd_qLc3u7KNOcMjM,621 +openai/types/realtime/realtime_mcp_approval_request_param.py,sha256=zdoRzHIrSzhfa3DTO4XyYQ4P1hNq4J3XesJFQmuD-9Q,717 +openai/types/realtime/realtime_mcp_approval_response.py,sha256=3GcWB31Mg2pWynk3-IqflayLAD6QRt_UXB2-4sKxgOU,676 +openai/types/realtime/realtime_mcp_approval_response_param.py,sha256=CU8G-jv5aYbTrts4JQuZeLHf3RZ2HgIrsCDtwkqSxk8,755 +openai/types/realtime/realtime_mcp_list_tools.py,sha256=MzGc-pTTKpBqweIMwvz5BOzBtDQGmqXFkY0En81l1Xw,889 +openai/types/realtime/realtime_mcp_list_tools_param.py,sha256=8L8i5K1xUxvT2Op4B5hN-x9YoclR9Wlb9vNi2q1TQo4,975 +openai/types/realtime/realtime_mcp_protocol_error.py,sha256=4jqkfl6h7tFT5kQy40VW24LrokpKe6X4VROYlNmOHDQ,313 +openai/types/realtime/realtime_mcp_protocol_error_param.py,sha256=jlufPTMU_9JuYtqzQGTmb0o978gDiOFxkNx0yJAvwx8,389 +openai/types/realtime/realtime_mcp_tool_call.py,sha256=dEtXdioDaSHaL91qnHOQKqx9KdilqjW3oZVJIprC140,1335 +openai/types/realtime/realtime_mcp_tool_call_param.py,sha256=Vrs4I_uCfGFLLyEdSt4L2PwPPS7OIH2DN3jKRzodTFQ,1349 +openai/types/realtime/realtime_mcp_tool_execution_error.py,sha256=swcOrTKO5cx1kkfGS_5PhBPEQx_Vf_ZW04HbA5eRa0g,314 +openai/types/realtime/realtime_mcp_tool_execution_error_param.py,sha256=3IuPmvy52n_VByGYqfCr87kciEQdJMTcwGWj4__PiX8,380 +openai/types/realtime/realtime_mcphttp_error.py,sha256=-Zqz0xr2gPs6peG_wC3S8qVgtEUJNrZm4Mm5BIvmZw0,301 +openai/types/realtime/realtime_mcphttp_error_param.py,sha256=GcmAMBvZVNrN9p_tneHPu_pyN7D8wCytaAKruFtMfwI,377 +openai/types/realtime/realtime_response.py,sha256=IvGy_VZPIRVCD4-mLElif7bOVMFJglR0tvU1zpfz6ys,3826 +openai/types/realtime/realtime_response_create_audio_output.py,sha256=gnMvrt0BR440zNDOmYB-j_Eh9WcaDExnZE8P68ptmdc,1004 +openai/types/realtime/realtime_response_create_audio_output_param.py,sha256=u1kCAMUjCRFoM402IZbfvRxvQLzrKN66PLqKG-yD2i4,999 +openai/types/realtime/realtime_response_create_mcp_tool.py,sha256=OhQM73g8gqOgsWphIb6Jw31ZaaucbG9BKDu7qk6mc2Y,4512 +openai/types/realtime/realtime_response_create_mcp_tool_param.py,sha256=2kxSDx7qzMPwB-pizGyqlr6QA2EnaSoEI3U_3RE0Ykg,4415 +openai/types/realtime/realtime_response_create_params.py,sha256=dgMzWX4DKFvHkHZYdZA-o_5zMUEkRmQAVwMKBfl3740,4280 +openai/types/realtime/realtime_response_create_params_param.py,sha256=5tgJnspzBWfDWjgQroFxYQawX7IvPi0HiAM2sAK9X4E,4312 +openai/types/realtime/realtime_response_status.py,sha256=bSeFcCy9c4jyf12ZzJFcxpCYKrSwMEgpNipOE1SNqcA,1325 +openai/types/realtime/realtime_response_usage.py,sha256=rxUW5DM1d4BY3F74KaImcADVnWasSv_Zj_febO30Vms,1429 +openai/types/realtime/realtime_response_usage_input_token_details.py,sha256=YcOrEtHj9QjJ-s3fmNqGMJ2nJUcJO_J9yXbCueppqZo,1244 +openai/types/realtime/realtime_response_usage_output_token_details.py,sha256=9wWB5tRft0LQsIgsIBsSaAhv4rDGgTl9Y5svpGU4ooE,459 +openai/types/realtime/realtime_server_event.py,sha256=5XfW7BkJMsJJUGXq0hCd7AtCa2uPPKnQbqkrorx_LYk,6578 +openai/types/realtime/realtime_session_client_secret.py,sha256=hjco-0FnTvhnMSLezczUBDz739hbvZSbxB4BeZCeark,583 +openai/types/realtime/realtime_session_create_request.py,sha256=1CK2FEaGAha7ASxnCiOOkM0XQK5_iiAj5gxpZfj4L68,4138 +openai/types/realtime/realtime_session_create_request_param.py,sha256=JPDnr2jtiigbdt3JjZBbiwMkrORYQOcLQIi-rCH5oEI,4124 +openai/types/realtime/realtime_session_create_response.py,sha256=8KLiPkKkRVYqefEX5lTGEeFx2FCi0P0JpMY-1ong6_k,16787 +openai/types/realtime/realtime_tool_choice_config.py,sha256=DV0uuyfK59paj5NC9adQskUF2km5TRSiHAlMDu1Fmdo,472 +openai/types/realtime/realtime_tool_choice_config_param.py,sha256=0vqYNM4MkU5d8GXfitT6AoE9AubKeLZOSHGOH8q73QU,550 +openai/types/realtime/realtime_tools_config.py,sha256=JSxehiQnA_tJUeXvi2h9H6wlYsnhhtRWB_o5S20V-oQ,318 +openai/types/realtime/realtime_tools_config_param.py,sha256=0jxEaIIHOdhLLAN2zQqsx8hrHSjWWeVvTW-896ye3gs,4708 +openai/types/realtime/realtime_tools_config_union.py,sha256=FbA6HwGnNC9AKBNh-3vjb7yzag5Snc88RY18gim-fY8,4769 +openai/types/realtime/realtime_tools_config_union_param.py,sha256=Wkxn6uvJDWi1IadV_DjbPmYeyThJlB50S4iizpw5Xvk,4595 +openai/types/realtime/realtime_tracing_config.py,sha256=TzKfoTJuLjPBG-qozwigXQv1uAZszgVX_K-U6HaiEjY,871 +openai/types/realtime/realtime_tracing_config_param.py,sha256=SqfUQ8RO0Re28Lb2AF2HlaJj7LS_3OK3kHXrUsKPcDc,840 +openai/types/realtime/realtime_transcription_session_audio.py,sha256=yGDcdMTaxGZKIgmDKnKQeEtgEH5SVYJfPXpr_zAr03c,414 +openai/types/realtime/realtime_transcription_session_audio_input.py,sha256=IXUUnr2WbKCeqPyd9VTge1Ho0MQvy0FZMh2l0awdTZs,3003 +openai/types/realtime/realtime_transcription_session_audio_input_param.py,sha256=sCvGim5THVMJ1c1a5ipyiht85tcrkgt75OsLIUp8ncs,3055 +openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py,sha256=nLF5DpguSg4-ectSCSSvbV7t7X2Z_yUvSCNQEdEuFEM,3489 +openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py,sha256=VL4MchzWUsCShFXvTnfJOKUqOh71mtZ_0YmEBrJ_ofQ,3320 +openai/types/realtime/realtime_transcription_session_audio_param.py,sha256=IdEgpkEbtPrEHJ-KkvEcV_8aSvCBzxBQDUQhB6ehQgI,451 +openai/types/realtime/realtime_transcription_session_create_request.py,sha256=-hJUbNd0rR0pbMnCzXvCylhOSLWUG42RNweAk_KhpXw,899 +openai/types/realtime/realtime_transcription_session_create_request_param.py,sha256=kP35GihtGw5L6T1okdSRJ9rJrs7FDBURy7htgKPDMR0,928 +openai/types/realtime/realtime_transcription_session_create_response.py,sha256=dpnCsv19sMo4aQ3oYIcStplpKft1EFRxQFLzLvaCaUM,2434 +openai/types/realtime/realtime_transcription_session_turn_detection.py,sha256=hFAIILzs1QaQ8JvX8PoHBExUm3eNZKWnJQfjQKnGBfE,1040 +openai/types/realtime/realtime_truncation.py,sha256=lnr1Uq9kSs6OfJb_TcvQrs7jx92UuSKaIhGNvwUK-qU,380 +openai/types/realtime/realtime_truncation_param.py,sha256=wBXHiAPS_HA6MWBqhRGEtqZxu6RdIrgnTVRYgUljwq4,442 +openai/types/realtime/realtime_truncation_retention_ratio.py,sha256=443HkkzJeCKSvLGYOGENOnsFOECc_k8RK9rNrpgtir0,515 +openai/types/realtime/realtime_truncation_retention_ratio_param.py,sha256=_hvzGBKDeouf3aJsB0EYK6UsL1UnH0rq1zKyzMW98tQ,581 +openai/types/realtime/response_audio_delta_event.py,sha256=9-CcYOY4JeBiFYVkGwQ1uOVHrtRNxsMg43M3csgaOw4,755 +openai/types/realtime/response_audio_done_event.py,sha256=Kuc7DYWSIcNfCH8M2HIl80phHyYnHnChfSdp30qXqUA,692 +openai/types/realtime/response_audio_transcript_delta_event.py,sha256=Pr0dP0Up-jY-QQiyL07q9kByaOMkV0WIaYrkDOCLhXY,786 +openai/types/realtime/response_audio_transcript_done_event.py,sha256=IEbDxwWpjCIoMpT5-iu3gTSAqbmqvOcjsKsj3PuYKvQ,800 +openai/types/realtime/response_cancel_event.py,sha256=WCXDsVwgkgyb3L8Nh-bPaaiDnifXjLyPbxvoIkN7YA8,636 +openai/types/realtime/response_cancel_event_param.py,sha256=nidzBL83liHwyImiNGiz9Ad0V34EtFAQDw1utqcF6ns,630 +openai/types/realtime/response_content_part_added_event.py,sha256=CSsdmclKPRCclNpUixYg54tUmJG3Dy1fgKe2-D7E8fs,1231 +openai/types/realtime/response_content_part_done_event.py,sha256=ws8nIPrUln5ue45ID_UdR8AjgYQiL6F0imrv7TMRsfc,1189 +openai/types/realtime/response_create_event.py,sha256=GlWcb2kLyq9oDcsJQ4nkwWjfGjleMQWy6HmDKztCXU4,654 +openai/types/realtime/response_create_event_param.py,sha256=_NQArkqOHZCFJsxq26HjHGS2IgVh8cy4VcjN9M80cj8,665 +openai/types/realtime/response_created_event.py,sha256=7LMTqoVE0WiqlAdEYMN0weSTBBhU_4CyD3gFxLAeKcg,505 +openai/types/realtime/response_done_event.py,sha256=u44ZBOYbzqiC8VqqDp8YuA9qBmVHWLXMJZGvOqJOIks,493 +openai/types/realtime/response_function_call_arguments_delta_event.py,sha256=8mQkxsj6MEUMtVutdqQG3ERqL4u1qNY55WKSXMol-0s,792 +openai/types/realtime/response_function_call_arguments_done_event.py,sha256=iIDsECFP-jj_fkcDGa1ZHstjkBVYxdbFeNvZV3_z0sk,792 +openai/types/realtime/response_mcp_call_arguments_delta.py,sha256=wXMighxToTIFK2ElkOrIYKvxqN9i-tZDR3iUdTFvRFc,831 +openai/types/realtime/response_mcp_call_arguments_done.py,sha256=3Hlq2bJW31yvvPOin3IOf1XSRMLMwPoZL5Kn9uTm1-o,708 +openai/types/realtime/response_mcp_call_completed.py,sha256=OlfSjKJmHn77tdwgt5wVGbGXL8HHQWhYQzpFf8QuOWg,563 +openai/types/realtime/response_mcp_call_failed.py,sha256=m3AZktEUCOzRp6UKCLPhLDhgYYkaWmO3hUTwTRERBmA,551 +openai/types/realtime/response_mcp_call_in_progress.py,sha256=PjME9TvWMBBR5gnEgrOf8zbuliR3eYW1h48RDsRgPfA,569 +openai/types/realtime/response_output_item_added_event.py,sha256=B_H6V9ijObo-JOUFIEH1JqMdKhhSuY24K5I9rly1j6c,721 +openai/types/realtime/response_output_item_done_event.py,sha256=64b5NILItLOAi9IxkYEhfAkmJSzoYjDLo4WJaL-zdOY,717 +openai/types/realtime/response_text_delta_event.py,sha256=Al1GXaZ55DQbrEyou48U8IfP2525e9G7277YfFM9VSU,734 +openai/types/realtime/response_text_done_event.py,sha256=bhm59wW9hARgdl55rzzEvx33Xajy5gQ9Fr11RUWOeno,737 +openai/types/realtime/session_created_event.py,sha256=nPM98I--WtKuzs3Srofj6kptYbRYt9x5LBMxxL7j9mQ,770 +openai/types/realtime/session_update_event.py,sha256=TmxL9PYD7GD_MAhRoGsor7mhAp2PZl4JaWl_APLteew,1088 +openai/types/realtime/session_update_event_param.py,sha256=7uKlFHpoKcvYMCeAJ2cwCWe-dDPW6XeMV-zNgRvtX1E,1160 +openai/types/realtime/session_updated_event.py,sha256=a5zFzk9iBCpB6TOjb_x_KjTdOhIPOSlW3cpx3nGNiKk,770 +openai/types/responses/__init__.py,sha256=yNb-l2XC4L8jRJu9GLt3KJCJ6kgHj8GDZP6wC1nex6s,14969 openai/types/responses/computer_tool.py,sha256=bigJ0RyhP6jKtAB7YM-oP2sPtL1isCnZufTue80u9vg,607 openai/types/responses/computer_tool_param.py,sha256=7SJn4rXdQeAt-DiMiXfdPI6Q_X6S7Wfxrc1Am8nPZeg,693 +openai/types/responses/custom_tool.py,sha256=WcsLiBUJbnMhjFF3hAFCP8SsCzzcbJh4BhC3NiVIl0c,736 +openai/types/responses/custom_tool_param.py,sha256=cAbh_D2pQa0SPEFrrRVPXXoQCndExjjqKrwRaBghWZk,748 openai/types/responses/easy_input_message.py,sha256=4rPo04A1WVaCxLpPn3e_gJNgdNuAKlH9k6ijLK3-Bdc,817 openai/types/responses/easy_input_message_param.py,sha256=8kM4AkSoiUOspuDTQPfdLjkgydQ9yHmo-FCfjdthtgU,873 openai/types/responses/file_search_tool.py,sha256=WquLED7txr7E_6-YebznUuEwNDnMRbXW8fKEQdqro80,1369 -openai/types/responses/file_search_tool_param.py,sha256=efmnWaFeNsB9EdOY5eJJ0DcTjPSASdue_m_AFODvEg4,1382 +openai/types/responses/file_search_tool_param.py,sha256=pPJBWiEY_JwT0mon35qpD55yjgNTGHrP34YFxNgvNAw,1423 openai/types/responses/function_tool.py,sha256=gpcLwRIXSp92jVJcIXBUnsSH_FzJrlH-jLIo-IbE1IY,796 openai/types/responses/function_tool_param.py,sha256=ZDGBcqx-T24wgum2YHr3kBzk-P8lH-lCkuAHxyzKxGI,861 -openai/types/responses/input_item_list_params.py,sha256=tslD-H9JZRZBH8yI2A8XmPilzr418KbYu0A-VsQJjqQ,1044 -openai/types/responses/parsed_response.py,sha256=cglKKalmeKmmH_tgfOPD78BlX6NqCCQpaN4oSvbTCWQ,3221 -openai/types/responses/response.py,sha256=2GB0L_qmbZMRQntUajmTLlx3LgqRuGccHCFDq64Usoo,9005 +openai/types/responses/input_item_list_params.py,sha256=wazm2tELpbpBWdAQrXGBq88Bm5RsxWXmlVJAV3f_k-I,964 +openai/types/responses/parsed_response.py,sha256=1rKsrhTtF8LhoRt_SHtBtQcRbztxAvPgZvTqGB9AMsY,3315 +openai/types/responses/response.py,sha256=1Mmz5nUciJcc3oxmBLbPFOfwsfMqO0VBRaJqzSfbzVg,11613 openai/types/responses/response_audio_delta_event.py,sha256=mXPosLnDn72HLG-Lk3EdyOw7isLm3HgpqQoYkG6XrJY,515 openai/types/responses/response_audio_done_event.py,sha256=26KUM9PJlWIQi80FKo5TSD9lKJh7JnPHnUCD5cqIcrg,414 openai/types/responses/response_audio_transcript_delta_event.py,sha256=Q3nSbPpT5Ij3iIvpweMF9KCct20B8MWJWOFV5pVqC8k,533 openai/types/responses/response_audio_transcript_done_event.py,sha256=92_yKmcs8ILjaA6NeoZR1wuzUS0VXLzCfMNcdRji6-o,457 -openai/types/responses/response_code_interpreter_call_code_delta_event.py,sha256=8nAJuGm1wfa3fqnvom5qqcaYp8Pzq3CP_mAoRt30LH0,688 -openai/types/responses/response_code_interpreter_call_code_done_event.py,sha256=jPyQXCk_ge8iGHDS1xqIrWwBZZd-VF4_Bctd8JXKVbI,682 -openai/types/responses/response_code_interpreter_call_completed_event.py,sha256=HEEIWRZqXHkGVgVb2V1EZ-ueyT_9xr6X9BYvvlBT4J4,780 -openai/types/responses/response_code_interpreter_call_in_progress_event.py,sha256=tUPTFke2CNa-Gok5dahF3IBbZLHF8bS8Ll8Fr6cq19I,786 -openai/types/responses/response_code_interpreter_call_interpreting_event.py,sha256=dVaBMmG7uPJWKWQBEiDeiqVrGXxmRLd9KQZT-Acrdfw,792 -openai/types/responses/response_code_interpreter_tool_call.py,sha256=iI0OCJio03GkZYx4IGWSjPkWYnYqECJR3wvNavffd7o,1560 -openai/types/responses/response_code_interpreter_tool_call_param.py,sha256=QK_z_lNbwzyJDbDnVKHNeF6Ni1y8GdtqkjdPNKeYXxM,1647 +openai/types/responses/response_code_interpreter_call_code_delta_event.py,sha256=mPveF26pvu_3esV1tMUnqfsT_NnZ1HWeqNM4F38NqUU,840 +openai/types/responses/response_code_interpreter_call_code_done_event.py,sha256=M5bmLyCJX8YFJv4GPtPBQZuXvt-ObQE9fztWnMli9rU,806 +openai/types/responses/response_code_interpreter_call_completed_event.py,sha256=STgdlJ5gQFLJeDwJGTGgvKKaJ_Ihz3qMNWWVjC9Wu4E,759 +openai/types/responses/response_code_interpreter_call_in_progress_event.py,sha256=4G7za-MHwtjkSILQeV_oQ6LEIzK35ak5HE3oi1pYFzA,767 +openai/types/responses/response_code_interpreter_call_interpreting_event.py,sha256=n8gNOqoJf47KE1T7kYE7q9bCeFnIUeODuFHmlGZcYkE,774 +openai/types/responses/response_code_interpreter_tool_call.py,sha256=ZOpjzLGxAoRad-MI4lrllxyVy4vBGnsMFj4jsNbej60,1646 +openai/types/responses/response_code_interpreter_tool_call_param.py,sha256=DOaAAxw2crU1ID9qrpJKvNEw1tAH8mW5kyUx3pSCnEA,1719 openai/types/responses/response_completed_event.py,sha256=lpsi8GcuDN1Jk624y6TsUjpxRO39-Pt_QeuVtU8g-QA,517 openai/types/responses/response_computer_tool_call.py,sha256=DZpxSuTbYHt4XDW50wpWm167hgHxZhBCnGbHN8SgUjQ,4644 openai/types/responses/response_computer_tool_call_output_item.py,sha256=BYBAJUKqSsAbUpe099JeaWCmTsk4yt-9_RnRroWV2N0,1493 openai/types/responses/response_computer_tool_call_output_screenshot.py,sha256=HVkJ_VJx1L9-sdIVzfdlk1EkrA3QSGJU24rcwqfvGzo,662 openai/types/responses/response_computer_tool_call_output_screenshot_param.py,sha256=YJ3_l0_Z_sAbhIVMnBeCriUn1Izql404_YEQHLbt2Xg,656 -openai/types/responses/response_computer_tool_call_param.py,sha256=p25y4yTFM8BrxIaGleGSqlRsndoPiR2Dbp5eGjHvf_s,5047 +openai/types/responses/response_computer_tool_call_param.py,sha256=stigaaGzVgCkjN8vCCdOgpcpxYUhm-PYJNjJyElOR6c,5089 openai/types/responses/response_content_part_added_event.py,sha256=58yTea-npQtrAhzj5_hBU6DcLA6B8Fv-sjtNFWKtkH8,1089 openai/types/responses/response_content_part_done_event.py,sha256=ruZJUMhcTKY0nU3dPpXs6psLyAyyDj5kAQBm21mTO9Y,1081 -openai/types/responses/response_create_params.py,sha256=xKJox4nu2msKVAj3ZnZJ92hq0EYmQB46Pp6RQFItwqs,9759 +openai/types/responses/response_conversation_param.py,sha256=sEhOVnULPS7_ZFHZ81YkLcF9yzlWd4OxWTuOvDdOcgE,340 +openai/types/responses/response_create_params.py,sha256=H5Jd316jyaPK2DZcsw1xTi9dsMKGxiHbBp6yhSOLukM,13378 openai/types/responses/response_created_event.py,sha256=YfL3CDI_3OJ18RqU898KtZyrf0Z9x8PdKJF2DSXgZrc,502 +openai/types/responses/response_custom_tool_call.py,sha256=3OFPImUjDkZPRnyf1anPoUD_UedOotTAF3wAeVs-BUM,730 +openai/types/responses/response_custom_tool_call_input_delta_event.py,sha256=AuKmvk_LEcZGNS3G8MwfAlGgizrPD5T-WwPV5XcwH7s,695 +openai/types/responses/response_custom_tool_call_input_done_event.py,sha256=6sVGqvbECYHfrb1pqbg0zPSO6aFu4BfG5fwI-EkCHOA,681 +openai/types/responses/response_custom_tool_call_output.py,sha256=XAwmrZXrzPCUvL7ngGBEe8SG89tmwUm6HSTk2dcl5dM,712 +openai/types/responses/response_custom_tool_call_output_param.py,sha256=vwB0jeJgbSsbPr77TYMUlhmyhR2didIiAXWDGdUmzPY,743 +openai/types/responses/response_custom_tool_call_param.py,sha256=bNJuc1YiF8SToRWjP0GiVgmttQieNPW0G5cfuKpvRhQ,771 openai/types/responses/response_error.py,sha256=k6GX4vV8zgqJaW6Z15ij0N0Yammcgbxv3NyMxZeJsdQ,915 openai/types/responses/response_error_event.py,sha256=695pQwl1Z2Ig7-NaicKxmOnhBDQKAcM44OiYCwl3bRc,576 openai/types/responses/response_failed_event.py,sha256=Y0g4NnAuY3ESLzrkJ6VUqQ2CuQYBQ3gCK5ioqj4r9Rg,492 @@ -1211,7 +1663,7 @@ openai/types/responses/response_file_search_call_completed_event.py,sha256=6gpE8 openai/types/responses/response_file_search_call_in_progress_event.py,sha256=wM-A66CcIlOiZL-78U76IjlrQo2DWEuR6Ce-vlRlNLQ,677 openai/types/responses/response_file_search_call_searching_event.py,sha256=wdDdm9zEPEFx6dNZx1omfN4Qlchf92vXh6s6AojYWM8,671 openai/types/responses/response_file_search_tool_call.py,sha256=DE3NhTc7hR5ZcTfHHV7FddimfuMIu5bjLIWJPRe0_9E,1664 -openai/types/responses/response_file_search_tool_call_param.py,sha256=-6iQ0SeUcjdY-F2CLmdtUmHhOOw0pOWCsNICob-Ynoo,1695 +openai/types/responses/response_file_search_tool_call_param.py,sha256=uNt3RQNJtRIhuyJ6iEadR_1KQ_urwzszo8hdCbuof30,1737 openai/types/responses/response_format_text_config.py,sha256=Z1uv9YytZAXaMtD_faYD6SL9Q8kOjSvRQXFkSZc0_hY,647 openai/types/responses/response_format_text_config_param.py,sha256=T6cMHds5NYojK9fZMMldWYBypWwVmywIIbkRm5e4pMc,625 openai/types/responses/response_format_text_json_schema_config.py,sha256=Bg7fRMlXuBz95kDZnee3cTNavvZNbPganIL4QI-rPLg,1414 @@ -1222,128 +1674,143 @@ openai/types/responses/response_function_tool_call.py,sha256=SNaR7XXA6x5hFWMVjB2 openai/types/responses/response_function_tool_call_item.py,sha256=Xbkpq2_-OQ70p-yA---inPz6YaRU8x1R4E6eTiWN7Zs,340 openai/types/responses/response_function_tool_call_output_item.py,sha256=NlYlCJW1hEn61heh9TMdrYHRVpOYXHucOH6IXVA6588,886 openai/types/responses/response_function_tool_call_param.py,sha256=k153-Qo1k-VPZidjuBPp2VcB6RGYGEQjGbZO2_RJ6ZY,941 -openai/types/responses/response_function_web_search.py,sha256=QbHSkY2Y_LBig2nei11sg0JSsyY01qHSLEoHPJyISWo,545 -openai/types/responses/response_function_web_search_param.py,sha256=nHe2ldVxO6zRD-GDhLTQtPE5vytW5QXd9d9XHr634lQ,621 +openai/types/responses/response_function_web_search.py,sha256=72x-qMcenYqDZfLRuFa3wA19jsXZn6UDnB0uRRtNi18,1794 +openai/types/responses/response_function_web_search_param.py,sha256=d_STmLPRRJki_Q533r2RlW_g3FktylWVxCvG3UI2Z5M,1937 openai/types/responses/response_image_gen_call_completed_event.py,sha256=sOYW6800BE6U2JnP-mEU3HjubGd-KkiPwZ7jisDT_7Y,671 openai/types/responses/response_image_gen_call_generating_event.py,sha256=1mjodLwyfkMBzcgQQhTix_EzQFNAWKnL6aycczObXJI,706 openai/types/responses/response_image_gen_call_in_progress_event.py,sha256=DxvV9tMMGGcu5lTgIuHTL7Kbt3bO40NKg6Qd8kATvkQ,708 openai/types/responses/response_image_gen_call_partial_image_event.py,sha256=xN3hw_RbEiD9ZoSZCf3TJZcL3JUIWCVzd5cha20s_7I,971 openai/types/responses/response_in_progress_event.py,sha256=uvYzRXq4v6LuXY8fNyGbzbTt4tySoSskzz_hUFWc-64,518 -openai/types/responses/response_includable.py,sha256=nlkwv4jc7NQME_DYJVygBfZI2Z59QmUlcrve0KgIBII,400 +openai/types/responses/response_includable.py,sha256=A4cf5h8hd2QheP1IKGrvCThGy0mzyZ73MeiJ-yhTNU4,436 openai/types/responses/response_incomplete_event.py,sha256=0EP3BJzI2E6VXcpEvaPenBKHGocEZbFjToSMMktUo7U,516 -openai/types/responses/response_input_content.py,sha256=MaZ-MNnZvhM2stSUKdhofXrdM9BzFjSJQal7UDVAQaI,542 -openai/types/responses/response_input_content_param.py,sha256=1q_4oG8Q0DAGnQlS-OBNZxMD7k69jfra7AnXkkqfyr4,537 -openai/types/responses/response_input_file.py,sha256=pr_t75zb0LomPFsCW9-8-GPCiCiY4Cajtit6MIpYAZ8,626 -openai/types/responses/response_input_file_param.py,sha256=iALK66fSKnUqQM3SF9A_vI5ZeuS05T7XVKYMSvFm2lc,641 +openai/types/responses/response_input_audio.py,sha256=OUNcmcb1VfKnxNIuDRunZNGp564UHOHUreiWhmQzOUE,574 +openai/types/responses/response_input_audio_param.py,sha256=-B87XBc8ndNEgOkm7U5ZI380fEmkDcAPa9fIzIPb7q4,673 +openai/types/responses/response_input_content.py,sha256=Xh7fU7qlvBR_-RjNSK0Nny8vqAFLAtKxrOJu8IwaPpE,620 +openai/types/responses/response_input_content_param.py,sha256=nnfvXgu3yeQdEMxwRkJQ69sCQvYpQOxqd5TJNx0xlbo,632 +openai/types/responses/response_input_file.py,sha256=Sp8QjnKF3XgUbPXRRpOhJAnlpbyVdAFM8AY-9Xa3JZo,717 +openai/types/responses/response_input_file_param.py,sha256=1v_0w7IsMTeasMI97k5RtWF2XsqJGEgoV7Urzm7_Rio,715 openai/types/responses/response_input_image.py,sha256=zHA7iFssu0aFcivwzyurAJgGpFdmzxq1BooVp5magsI,778 openai/types/responses/response_input_image_param.py,sha256=5qhS_nF1GH3buGga8HSz9Ds2gVqQ8OqhfhkvssciIHE,830 -openai/types/responses/response_input_item_param.py,sha256=8tUhXUceoqhYGzTmZZsnZHuOdxN1uCb7l-PSIpck8_8,9370 +openai/types/responses/response_input_item.py,sha256=o-uhywXY8scND8qVqVGKzd-MKbllzyRsjU5Wug2V6Ps,9101 +openai/types/responses/response_input_item_param.py,sha256=CSzrMeHdhOxx-XOJJyMp_P15JXy7q7dJ0L6lTCsFk5g,9642 openai/types/responses/response_input_message_content_list.py,sha256=LEaQ_x6dRt3w5Sl7R-Ewu89KlLyGFhMf31OHAHPD3U8,329 -openai/types/responses/response_input_message_content_list_param.py,sha256=cbbqvs4PcK8CRsNCQqoA4w6stJCRNOQSiJozwC18urs,666 +openai/types/responses/response_input_message_content_list_param.py,sha256=cCy7w0Qwk8aiID12bCM6JidTSlPqNsVeDXho3DUX2n4,761 openai/types/responses/response_input_message_item.py,sha256=_zXthGtO0zstLvIHg9XesNAme6yNa8JOejkBYLwXm70,1029 -openai/types/responses/response_input_param.py,sha256=KA6n4A3F_vcEWwELiUNUCbuUnrw91agObX7tuO4cX0Q,9458 +openai/types/responses/response_input_param.py,sha256=3t232yWf5z_gK9QejJujbt64JncZGlVJugH90Kl5CEc,9736 openai/types/responses/response_input_text.py,sha256=L7ikIc1qFUSjB9FLeKiy6uwa2y-TkN1bMMgq7PpGOuE,375 openai/types/responses/response_input_text_param.py,sha256=N9k0QajI4grRD44GKOz4qG4nrU_au1kVZWmwX3o0koU,441 openai/types/responses/response_item.py,sha256=TciYLydeQfdkGXqlD_DXd4BG3b5Z1sbT6ydgJ7AIIAc,5918 openai/types/responses/response_item_list.py,sha256=uGGJlbBtuaNadG9PjebjngvtKdXTcI7MIvF05m7qtjc,665 -openai/types/responses/response_mcp_call_arguments_delta_event.py,sha256=oz-FqtII3ZfwKOxGWcWzQiCcNPT_R3dWS8wU5cPNLYs,742 -openai/types/responses/response_mcp_call_arguments_done_event.py,sha256=Gdr6ofOts6UFbw9ob84kUm9ZDGf7KgFHuOz7gxBAOGg,730 -openai/types/responses/response_mcp_call_completed_event.py,sha256=vTaN9eks6TnaX2_SffIrefvtiHYfPSfbK7wXxaiF-IQ,445 -openai/types/responses/response_mcp_call_failed_event.py,sha256=1KULLG9M03vBDMSpRCs8m5DmA-Mhc3ghmDVCekten3g,433 +openai/types/responses/response_mcp_call_arguments_delta_event.py,sha256=dq4_Z156rwK6F9_97sgEOZJHNNPxt6ZfGHX8b_MSWS8,778 +openai/types/responses/response_mcp_call_arguments_done_event.py,sha256=16ETbPuAreyAapg7rKMLWtSOlu6-mxfrkJUfVKiV9dM,752 +openai/types/responses/response_mcp_call_completed_event.py,sha256=ylzTH1FOI2Ha8PABzWOF_ais1_GgMsBmUklaTkR18bU,600 +openai/types/responses/response_mcp_call_failed_event.py,sha256=BmPnCvz72x-lgUK6x8Svmxo1y4ep0FJWYh5ROgYyuCU,582 openai/types/responses/response_mcp_call_in_progress_event.py,sha256=Em1Xni2Ah6m7pF4wsvI_7Q0UMIlHsd75uF0r2Z6RI14,638 -openai/types/responses/response_mcp_list_tools_completed_event.py,sha256=lP31g-jwrmy1D-vMoiAPnv5iuClidUDL30-65yZ8Nb8,467 -openai/types/responses/response_mcp_list_tools_failed_event.py,sha256=Oh8DdfO5J_yohg-k5jOl9MWFOxwaqtCnld4MMYf8E5M,455 -openai/types/responses/response_mcp_list_tools_in_progress_event.py,sha256=pu4ncfaKR6-Qlu9ksjb1vkfxoxzTrn5bEaBqQ1ukVEM,473 -openai/types/responses/response_output_item.py,sha256=M_T0HOjh0POw0R92nMsMHblv4G5GJYays_FxmOHqaKs,4575 +openai/types/responses/response_mcp_list_tools_completed_event.py,sha256=3tLqKFzakR7H9_gPdYBzyLlKmIOrjtWuULex2069EY0,637 +openai/types/responses/response_mcp_list_tools_failed_event.py,sha256=NhjpRJ5jTrsc7qhQYL9aKTdL6FT6LClZB03G25WySQM,604 +openai/types/responses/response_mcp_list_tools_in_progress_event.py,sha256=_mfZNKGLIVvEmvmfBie4Q5QMUmzAiSyjdHQdORfcqWY,646 +openai/types/responses/response_output_item.py,sha256=rZk_B9avi1xxzktkaSXqEduSjHz3VHSKIMIPuvMYbUQ,4669 openai/types/responses/response_output_item_added_event.py,sha256=ct7JDhk7EzyD7oDFVFx1X8T2hblAuDQea3GPXY61Fzw,644 openai/types/responses/response_output_item_done_event.py,sha256=adnds7wknAbha4-USAUosKuQTMFwA58pZC842VUrJO0,652 openai/types/responses/response_output_message.py,sha256=FXVWYe6pptTXvCxwadX602dL4xNjl1GKugTOrlFCBuU,1104 openai/types/responses/response_output_message_param.py,sha256=VfnkR1ClDhUq3uoGsrp-HmmYoDmkY6X3wNcdXC7NHjU,1148 -openai/types/responses/response_output_refusal.py,sha256=9Ni2xOahKhnGVTcYtIUPKQQS5X3Yf7mb_QhkYqmzECA,387 -openai/types/responses/response_output_refusal_param.py,sha256=HGCDx6nfm5l9ZnCCH6sWbPiuB20evywnsbmQsd_05b0,453 -openai/types/responses/response_output_text.py,sha256=NS-Bl1bhQ7Hlxlg7f9sDeVDdnc8EkHVpEj6ceJQmmHo,2678 -openai/types/responses/response_output_text_annotation_added_event.py,sha256=3PbnaN9s58uDznF-ga6W5peMlYlEteqIA7XTQBVuum0,963 -openai/types/responses/response_output_text_param.py,sha256=kQKZfFqeoxB5BEgoORHO3KRPYHSxucalTv77VNABw9c,2961 +openai/types/responses/response_output_refusal.py,sha256=oraX9ZXcD4B7w8t9jcbZPACp-8puytJX_1SSQfTAy_M,388 +openai/types/responses/response_output_refusal_param.py,sha256=kCxtRvVJ6PF75Svmd3JUXyV_W-m9VqV-SpjSe6VUt3Y,454 +openai/types/responses/response_output_text.py,sha256=dZwIefV0zZmQJZ-7jfbgQwu6BJRHuFlG3V_AjxNRy3s,2810 +openai/types/responses/response_output_text_annotation_added_event.py,sha256=xGlSoFd2n9hjLeVKCQPh-yBtI2uS-d3ckJBHHmEoXg4,963 +openai/types/responses/response_output_text_param.py,sha256=H9Hq_D5Unp1Y1m4QDblzpcJiZ-5yDuhCtQSvIYSVddY,3113 +openai/types/responses/response_prompt.py,sha256=hIkV3qs1eSvczvxif_w-QSAIRuUjNc-Iukl447udRQ4,936 +openai/types/responses/response_prompt_param.py,sha256=SC4_UYJudF-inMfJ-PBNRGPOO0gNE9IbQ3ZO0loqzVY,1027 openai/types/responses/response_queued_event.py,sha256=EDgtn58yhHg9784KjOwIK5_qRxZOnRdX25gKNMCt958,508 -openai/types/responses/response_reasoning_delta_event.py,sha256=-spXoAqeEofsw0xM10Ulo9UQYMXX8xCVFcfJWKQUGHY,801 -openai/types/responses/response_reasoning_done_event.py,sha256=vWbyyahV8D2lyia07Cb4qivk1NF9KUZt479MnotjkIw,774 -openai/types/responses/response_reasoning_item.py,sha256=Yu53k8F4FJlH_YtkWi2w7ACY5A4BRGLiI0T6OMAZZkc,1177 -openai/types/responses/response_reasoning_item_param.py,sha256=I9YCrF8qX612JupQYb3K-nX39P-dwgRhJnJbw1_F7hM,1270 -openai/types/responses/response_reasoning_summary_delta_event.py,sha256=bIXoC-0e1QzrhbCkJm7S0CF2QqENjNCl5jmIikoq6dc,855 -openai/types/responses/response_reasoning_summary_done_event.py,sha256=rxj1Z-YhjC_3HSWecFooGsC9_1jzXQwpsHrMZ6nzRrE,814 +openai/types/responses/response_reasoning_item.py,sha256=bPpkc35rJGE83wv4MiUTCHBC1FC86WHQMESna27PDPA,1421 +openai/types/responses/response_reasoning_item_param.py,sha256=k-muDy6hROsWHQXXvBd16_Vf9RRJUoknuIAx2HTX3BU,1534 openai/types/responses/response_reasoning_summary_part_added_event.py,sha256=wFecLMHuG4cmznOQvr9lD31qg9ebU8E6T7IVXxTR3EM,1006 openai/types/responses/response_reasoning_summary_part_done_event.py,sha256=VhU-pOK6fGfCsarOUZ5PD-GTHIvKspOuiWqG709_KMM,997 openai/types/responses/response_reasoning_summary_text_delta_event.py,sha256=GtOuch2QaTXItNJR9hk0Y9TD5s_INjc22a9-e52KfBM,846 openai/types/responses/response_reasoning_summary_text_done_event.py,sha256=_fPOh7N6naMEHcRv42nUlb9vKC9lI8BJ0ll20T1ejzg,833 +openai/types/responses/response_reasoning_text_delta_event.py,sha256=Bv6wVhRCIve81iyl8287xssRVbg1SRZA8__GCx3Lrec,841 +openai/types/responses/response_reasoning_text_done_event.py,sha256=4F30ObYxJKBjjoXbz5Vsij4PVWo_5M3FjPlMTT8Q29Q,788 openai/types/responses/response_refusal_delta_event.py,sha256=ss7m9NX5doTFE6g79k3iBK_z5gXstGFeM2Z2gcO-cPo,770 openai/types/responses/response_refusal_done_event.py,sha256=0iI5jIbuDuHAPnzSK0zWVf8RdjiXTt1HoYEVy4ngIKI,775 -openai/types/responses/response_retrieve_params.py,sha256=3JxduurHl61uWGONfDgfKhVOo6ppYxfIS6PTBjKZZJY,1834 +openai/types/responses/response_retrieve_params.py,sha256=Y_4UacCQ7xUYXc7_QTCJt-zLzIuv-PWocNQ1k0RnPsw,2372 openai/types/responses/response_status.py,sha256=289NTnFcyk0195A2E15KDILXNLpHbfo6q4tcvezYWgs,278 -openai/types/responses/response_stream_event.py,sha256=HfOM5SY4bG3ifDSfMN4XMy6HP299WYZSbEki-sepTnc,6868 -openai/types/responses/response_text_config.py,sha256=rHEatq9T7_rt7D2Zlt9AUjFOQNBg_NTGfoec-7hx444,999 -openai/types/responses/response_text_config_param.py,sha256=kJ2FWZdHPQO9uXFXtZ7wYtj_RdzkPea8SF3OpTLfXDs,1036 -openai/types/responses/response_text_delta_event.py,sha256=bigKDi02n-trLZgBW8Dq6KMaMSvZ9sHecP-wRt8eKkk,769 -openai/types/responses/response_text_done_event.py,sha256=S2zoaEmYylSkfzj0NGGVjUXluC6MXi-It4JqZ-nL9bY,775 +openai/types/responses/response_stream_event.py,sha256=uPEbNTxXOaiEFRVt_PbdeecyfS9rgjaYU7m15NIvSbo,6916 +openai/types/responses/response_text_config.py,sha256=dM28UJfEjLSKBcRHNmBQJjkZSVdZ-vDFccPTVmXYs00,1352 +openai/types/responses/response_text_config_param.py,sha256=348GrnnGUF8fGEfRSW-Vw1wFoqTqQw7FfcgIvc1usCg,1381 +openai/types/responses/response_text_delta_event.py,sha256=e96nx3l-1Q3r9jCGyGgiH-siauP5Ka4LJ8THgUrkEXk,1374 +openai/types/responses/response_text_done_event.py,sha256=PDENYq1-kdZD19eps5qY3-Ih96obk75iUSVO-XUmkig,1380 openai/types/responses/response_usage.py,sha256=DFA8WjqKGl7iGCmZl2G18y48xT82UTZ_NCKm0MAuRDY,945 openai/types/responses/response_web_search_call_completed_event.py,sha256=gWv2xgDeGbvN0oqm96uuecGBy1SkbF_yNA56h5hMlOE,698 openai/types/responses/response_web_search_call_in_progress_event.py,sha256=XxOSK7EI1d0WDkfG5jgU_LIXz72CGixqp4uYW88-dY8,704 openai/types/responses/response_web_search_call_searching_event.py,sha256=sYr9K30DjDeD_h5Jj41OwoTrvUkF--dCQGnQuEnggcw,698 -openai/types/responses/tool.py,sha256=6QpXUyCi2m0hO3eieZd3HUirlm3BW9T0oMexjiiY91I,5364 +openai/types/responses/tool.py,sha256=HC_PWExosVKGGJ6DKCcBYbU9ipPIRBu_fZfZe2G4pg4,8438 +openai/types/responses/tool_choice_allowed.py,sha256=I0bB6Gq7aIswr3mWH3TN6aOgtun01Kaopa72AhZJG9I,1023 +openai/types/responses/tool_choice_allowed_param.py,sha256=PMokbtPLR48_b_ZNe0AMyZx-C-OrcwPsbeX31DpoIwE,1107 +openai/types/responses/tool_choice_custom.py,sha256=xi7cPj8VJn4qYXXSkZwFoV_WdYbyGwEVTDIcdHL9AQo,382 +openai/types/responses/tool_choice_custom_param.py,sha256=0ZHVrSkRkVFuCC27k6TQKy2hBoCDt6NB2f8fVnLNrXM,448 openai/types/responses/tool_choice_function.py,sha256=X51PqYW8HMrJcxSkaTCF-uDG_KetD_6WqU1TgmCPR-k,384 openai/types/responses/tool_choice_function_param.py,sha256=UzIJgiqJV7fj0nRDWyzwxpwJmZd0czZVciq4ffvfl_4,450 +openai/types/responses/tool_choice_mcp.py,sha256=iq6CwniC-hOQ9TmH4D4Wo6hT5V0J_4XbZ1TTtf0xEf8,481 +openai/types/responses/tool_choice_mcp_param.py,sha256=E4VcW1YhjYJgYaSw74NuluyM9WylELUZIs7-s4u-N1A,540 openai/types/responses/tool_choice_options.py,sha256=gJHrNT72mRECrN7hQKRHAOA-OS0JJo51YnXvUcMfqMQ,237 -openai/types/responses/tool_choice_types.py,sha256=BZc2_G4B5s9eE6zHDj2YBnkSxEBLr-a7cD9OGXlR8Bc,767 -openai/types/responses/tool_choice_types_param.py,sha256=SD_zM018xHA4V-1XlEw3XzQ6T-nwwQYV7CKNtB-_qrw,869 -openai/types/responses/tool_param.py,sha256=qS-L1hfXNMOu2OXMZqAYHykblLrSgd6-pPk1Yh7MbJQ,5427 -openai/types/responses/web_search_tool.py,sha256=fxH0MSyeuXljQrWMb5FQeVM0dEiVdfgKEZK95ysbrJA,1455 -openai/types/responses/web_search_tool_param.py,sha256=Y99uTiH6B2TDaJeda9bttq7M6Ysx-Po7OZCr6wrC4q0,1482 -openai/types/shared/__init__.py,sha256=vWsaaMGhtO2wF8GSbICC2fDYk0To-olrzMUcfRiYKPU,991 -openai/types/shared/all_models.py,sha256=JQiXx-rIXkNLmkcs7vL8zlp3urPUJNa70gE9-i55eOA,467 -openai/types/shared/chat_model.py,sha256=ke-pqEgbyePPcpFQDwzAljK6RhFvzZwAFtAbe6U_iu4,1575 +openai/types/responses/tool_choice_types.py,sha256=-3FM-g4h0122Aq2CxEqiNt2A4hjYWPrJJ9MKh_hEROs,740 +openai/types/responses/tool_choice_types_param.py,sha256=_EqjVdOTy8bjKho3ZGdwYAgc11PaXp804jkBvj9dCz4,838 +openai/types/responses/tool_param.py,sha256=D5QkwR6KszzpocepbXPg8Ygc3QPhaLP_G8Ge24jxRWk,8392 +openai/types/responses/web_search_preview_tool.py,sha256=jIoIdmR4tzsIjT2a_5II0tHCnJsea4HTirBR2u00hFk,1469 +openai/types/responses/web_search_preview_tool_param.py,sha256=W64kS2h1cm2lY9ODnp_YoLojRyjei9SZq2UU7X2AJ48,1496 +openai/types/responses/web_search_tool.py,sha256=WuPSLv-W8j8LQvUyHA7S6gGtJrQmGP_t0QCrbh6qPYI,1821 +openai/types/responses/web_search_tool_param.py,sha256=6iMdaKKYaO7bTUzSfmfw3owAjiQGh55qgjr8E1geCPc,1862 +openai/types/shared/__init__.py,sha256=EVk-X1P3R7YWmlYmrbpMrjAeZEfVfudF-Tw7fbOC90o,1267 +openai/types/shared/all_models.py,sha256=iwrAzh3I17lQZl0AvG7vpAlGLvEYCZyOtvChufZv8eg,611 +openai/types/shared/chat_model.py,sha256=6VpDw8bZPrezzjN8UfBwKpIWokakgU-12rdLzQulLHo,1731 openai/types/shared/comparison_filter.py,sha256=Y77SD30trdlW0E8BUIMHrugp2N_4I78JJabD2Px6edU,766 openai/types/shared/compound_filter.py,sha256=QhKPeKKdtWvMDDO85YLKUGgdxBQfrYiFimjadAM31Bs,581 +openai/types/shared/custom_tool_input_format.py,sha256=cO7pX1O0k8J6FgERYUqNjafjjYiwS7GCmIw3E_xSiVQ,773 openai/types/shared/error_object.py,sha256=G7SGPZ9Qw3gewTKbi3fK69eM6L2Ur0C2D57N8iEapJA,305 -openai/types/shared/function_definition.py,sha256=8a5uHoIKrkrwTgfwTyE9ly4PgsZ3iLA_yRUAjubTb7Y,1447 +openai/types/shared/function_definition.py,sha256=2F07J5Q7r2Iwg74dC5rarhwWTnt579Y5LUrNc8OdqSc,1475 openai/types/shared/function_parameters.py,sha256=Dkc_pm98zCKyouQmYrl934cK8ZWX7heY_IIyunW8x7c,236 openai/types/shared/metadata.py,sha256=DC0SFof2EeVvFK0EsmQH8W5b_HnpI_bdp47s51E5LKw,213 -openai/types/shared/reasoning.py,sha256=e9daklz1mYX8L3kksgwmRffLs-Rm8h0rA7_2UvTlRG8,1251 -openai/types/shared/reasoning_effort.py,sha256=g_H_wr52XEosQt8OqeKB5v5KbqIV5z5LaoRTxxfKC-c,268 +openai/types/shared/reasoning.py,sha256=FvPkybiYMTz2wqeTAAm0f1nWqUlvTXT1IEnCXzwU95Q,1241 +openai/types/shared/reasoning_effort.py,sha256=oK9lKsN8e2SZ8jV49MZ7PBxbnCP1MxGUQDLYMxlGQYE,279 openai/types/shared/response_format_json_object.py,sha256=E1KGMUZnaj8fLnQXQC8_m9rMp8F6vIqeR9T1RmFNvE4,352 openai/types/shared/response_format_json_schema.py,sha256=SsiLtgrudK4Dvxi2Kx0qUFiBQt26y5uGw_33te7L0Gg,1568 openai/types/shared/response_format_text.py,sha256=p_JASD-xQ4ZveWnAtSoB8a19kVYc9vOZeg6WRMYHKDE,326 -openai/types/shared/responses_model.py,sha256=M0Se0iUQBncS3iF7LAGA0r7tRx1Pc14kCgpmdGFtm14,477 -openai/types/shared_params/__init__.py,sha256=0NOlmiuWaKkKF6oO8RFcnMfhA0tZOc7A4a94iF_BEg0,891 -openai/types/shared_params/chat_model.py,sha256=2OhZt8X-QLqFhkdedm54iWrZ_kn7joExNmlmJMP8cf4,1611 +openai/types/shared/response_format_text_grammar.py,sha256=PvmYxTEH_2r2nJsacTs6_Yw88ED1VbBuQJy_jZVbZwo,418 +openai/types/shared/response_format_text_python.py,sha256=Rfkd4jhzndD0Nw5H6LLnR4Y3MySyTz331MwoxcBL-Ek,342 +openai/types/shared/responses_model.py,sha256=JRAPcWBTgTFtsejFYHdN_MUJ77wk3TkE9Ju6ExkEjiM,621 +openai/types/shared_params/__init__.py,sha256=Jtx94DUXqIaXTb7Sgsx3MPoB9nViBlYEy0DlQ3VcOJU,976 +openai/types/shared_params/chat_model.py,sha256=S0JO3lMtaZ7CG8ZvjYcRls-CF5qLL7AUUDuj1peeKDE,1767 openai/types/shared_params/comparison_filter.py,sha256=ayLPPfnlufcZnpgmWXZ-iuwpacUk5L7_hITuDyegFiQ,832 openai/types/shared_params/compound_filter.py,sha256=dJrqaoOVY8QBEZPCjjD3hhf4qwcJLJ26jgK4N85bEFc,646 -openai/types/shared_params/function_definition.py,sha256=ciMXqn1tFXnp1tg9weJW0uvtyvMLrnph3WXMg4IG1Vk,1482 +openai/types/shared_params/custom_tool_input_format.py,sha256=ifDywFgUir2J2CPm1vyNcGnwl6nJFQsMFF1-qOvAdJA,769 +openai/types/shared_params/function_definition.py,sha256=6JjuRmXIofTv76GCC4XFssqgZw-iKbBazjWqKerfq6Q,1510 openai/types/shared_params/function_parameters.py,sha256=UvxKz_3b9b5ECwWr8RFrIH511htbU2JZsp9Z9BMkF-o,272 openai/types/shared_params/metadata.py,sha256=YCb9eFyy17EuLwtVHjUBUjW2FU8SbWp4NV-aEr_it54,249 -openai/types/shared_params/reasoning.py,sha256=PO6oxPasppSk0Y9BfCDcKgetv-0siotGajfM4Kkt98w,1265 -openai/types/shared_params/reasoning_effort.py,sha256=cS4fD2p16byhxLByCiptb-sZgl-4PSAPlfRvMpGDUo4,304 +openai/types/shared_params/reasoning.py,sha256=iHGUp7rPlMczbNWCJe4Jaz0IMBpRBGaxUfU8qkbbZoA,1255 +openai/types/shared_params/reasoning_effort.py,sha256=d_oflloFU0aeSyJrEZKwpwi0kZNUsg8rEZ4XUU-5eoE,315 openai/types/shared_params/response_format_json_object.py,sha256=aEdVMoEkiEVE_YX6pfj5VqRVqfRIPju5hU-lqNubhVE,398 openai/types/shared_params/response_format_json_schema.py,sha256=iCr7oU2jaHmVAi60mG90uksfv1QQjtvrVT9Vd3paE0k,1529 openai/types/shared_params/response_format_text.py,sha256=N3-JNmbAjreYMj8KBkYb5kZhbblR9ds_6vwYLzUAWDA,372 -openai/types/shared_params/responses_model.py,sha256=SBgN7dLsP-oA7umyIyGnma1Ode5AR6GPoaq51WcjyOg,521 +openai/types/shared_params/responses_model.py,sha256=Q_LwiGeR1Cb2Uikzq6MJDOJOLaM8s1rBVOT9EvryquU,665 openai/types/static_file_chunking_strategy.py,sha256=JmAzT2-9eaG9ZTH8X0jS1IVCOE3Jgi1PzE11oMST3Fc,595 openai/types/static_file_chunking_strategy_object.py,sha256=MTwQ1olGZHoC26xxCKw0U0RvWORIJLgWzNWRQ1V0KmA,424 openai/types/static_file_chunking_strategy_object_param.py,sha256=OwAOs1PT2ygBm4RpzHVVsr-93-Uqjg_IcCoNhtEPT7I,508 openai/types/static_file_chunking_strategy_param.py,sha256=kCMmgyOxO0XIF2wjCWjUXtyn9S6q_7mNmyUCauqrjsg,692 openai/types/upload.py,sha256=lFrEOsbVJwQ6jzzhn307AvBVjyF85lYHdig5ZvQQypE,1207 -openai/types/upload_complete_params.py,sha256=7On-iVAlA9p_nksLSFPBPR4QbB0xEtAW-skyh7S9gR0,504 -openai/types/upload_create_params.py,sha256=ZiZr1yC6g2VqL7KEnw7lhE4kZvU-F3DfTAc2TPk-XBo,889 +openai/types/upload_complete_params.py,sha256=PW5mCxJt7eg7F5sttX5LCE43m9FX8oZs3P5i9HvjRoU,527 +openai/types/upload_create_params.py,sha256=n9BNQ7GasHGCQf7poS5NKSEQM8eUCzb6rRBVFqylmlw,1507 openai/types/uploads/__init__.py,sha256=fDsmd3L0nIWbFldbViOLvcQavsFA4SL3jsXDfAueAck,242 openai/types/uploads/part_create_params.py,sha256=pBByUzngaj70ov1knoSo_gpeBjaWP9D5EdiHwiG4G7U,362 openai/types/uploads/upload_part.py,sha256=U9953cr9lJJLWEfhTiwHphRzLKARq3gWAWqrjxbhTR4,590 openai/types/vector_store.py,sha256=hS30tSgL_s1BC04nIHfZL95-uD60t5Oe44JUQnVD8T8,2470 -openai/types/vector_store_create_params.py,sha256=F8uSdmchzrYLc80Xq4B12sqZQXKuSIHMhsgVXu6fn1I,1724 +openai/types/vector_store_create_params.py,sha256=mmOkVJk2qH2SeUos0p1keKCFYp7xUTWe00ielN0LCpE,1764 openai/types/vector_store_deleted.py,sha256=BbtnlZ0Z5f4ncDyHLKrEfmY6Uuc0xOg3WBxvMoR8Wxk,307 openai/types/vector_store_list_params.py,sha256=KeSeQaEdqO2EiPEVtq1Nun-uRRdkfwW0P8aHeCmL5zA,1226 -openai/types/vector_store_search_params.py,sha256=XEtL0rVNf0q3Hw6BGGUDWzVobsp421HHLBgo8E4_FXY,1128 +openai/types/vector_store_search_params.py,sha256=EnYfNFP4dgovZeLLPeGofA3TCJatJDYt4aoppMOto9g,1262 openai/types/vector_store_search_response.py,sha256=qlhdAjqLPZg_JQmsqQCzAgT2Pxc2C-vGZmh64kR8y-M,1156 openai/types/vector_store_update_params.py,sha256=RJm0qkqLOsHjhPIiOWPNwkrEIqHjDukyZT52mle4gWc,1240 openai/types/vector_stores/__init__.py,sha256=F_DyW6EqxOJTBPKE5LUSzgTibcZM6axMo-irysr52ro,818 -openai/types/vector_stores/file_batch_create_params.py,sha256=2Rfno13Ue0arNpndNSXxw3j1LOK7MwVwB2q7HFCGJMo,1261 +openai/types/vector_stores/file_batch_create_params.py,sha256=f931A3sW8V30WGXQYCU561EqG3XRxShgE_hSpu-hFrM,1302 openai/types/vector_stores/file_batch_list_files_params.py,sha256=FPpQvCQI2skyLB8YCuwdCj7RbO9ba1UjaHAtvrWxAbs,1451 openai/types/vector_stores/file_content_response.py,sha256=uAFvFDE_NVRzg0xm1fLJ2zEd62qzq8rPYko7xpDjbaU,367 openai/types/vector_stores/file_create_params.py,sha256=nTHWG0OMqqLRjWFH2qbif89fpCJQCzGGdXDjCqPbq1Y,1229 @@ -1352,5 +1819,22 @@ openai/types/vector_stores/file_update_params.py,sha256=NGah01luDW_W3psfsYa3Shls openai/types/vector_stores/vector_store_file.py,sha256=mfmXBL4EqHuaoamRnZ2TS1oX3k1okTREU2vLOrbVglw,2247 openai/types/vector_stores/vector_store_file_batch.py,sha256=MnRehH5Mc0VOhSCZtniMDz8eH72syy2RScmECR_BEhE,1456 openai/types/vector_stores/vector_store_file_deleted.py,sha256=sOds3FSmDBFhe25zoSAz2vHsmG2bo4s2PASgB_M6UU0,321 +openai/types/webhooks/__init__.py,sha256=T8XC8KrJNXiNUPevxpO4PJi__C-HZgd0TMg7D2bRPh4,1828 +openai/types/webhooks/batch_cancelled_webhook_event.py,sha256=9eadXH42hNN8ZEnkvT1xP4-tXJSSU1EnFo0407UphUU,770 +openai/types/webhooks/batch_completed_webhook_event.py,sha256=HTcSImBaYwlnm8wQdvjPaWzyFIS-KBSSA_E2WkQ1uqg,770 +openai/types/webhooks/batch_expired_webhook_event.py,sha256=fbrvrZrbQZNf_aPBm08HSD99NFaAHVjv4nQg3pNmh9w,756 +openai/types/webhooks/batch_failed_webhook_event.py,sha256=WRxFObJMtp7zPJTl_pa4ppVhKSxHwNMvQdqyR0CqdV8,751 +openai/types/webhooks/eval_run_canceled_webhook_event.py,sha256=hLoN9c6C5QDPJEOLpOInSiGRgqsrtZmwE3NIOjiowtM,757 +openai/types/webhooks/eval_run_failed_webhook_event.py,sha256=rMoiy66aVGgyA2Fxu3ypg1Q1moIj0yDyMsL4ZVJAe6s,743 +openai/types/webhooks/eval_run_succeeded_webhook_event.py,sha256=GFRFtx7JxtUGeWEoQRpbeE3oPoOhPhW1BskJOxuaFI8,758 +openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py,sha256=kFx4imcbFxTD4L4G6h6kSINfX7yLpo4GQDAuYBGd9wM,802 +openai/types/webhooks/fine_tuning_job_failed_webhook_event.py,sha256=YjfTRr2mvpiJB4IZkzcFNNLwnhrUKVKkLP7RpPgHTnA,783 +openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py,sha256=wxUg8-llqFJ6K--LI3JHXgTJ1IY2vCD7rO1eq8RWoYo,798 +openai/types/webhooks/realtime_call_incoming_webhook_event.py,sha256=E7mD7ZO6_1v_SAn60-8pDzR5q2WRM0cFygkJ1I-pUpo,1019 +openai/types/webhooks/response_cancelled_webhook_event.py,sha256=60u91Tcsy_qNaPDqQM_tqWQHXVoSB0-rodF3Llkzzmk,776 +openai/types/webhooks/response_completed_webhook_event.py,sha256=OGSfVNA6Vgugplf4LxXhSkk-ScVvElekoQeksT93z_Q,776 +openai/types/webhooks/response_failed_webhook_event.py,sha256=SWMK_kc1o8WKeQPZudQx7VwU25oAHf_yLR6fKdXKd2E,757 +openai/types/webhooks/response_incomplete_webhook_event.py,sha256=O0LrpnzzxClQf0vQOwF6s_5EAUxM4TdTfEd8uc84iLs,782 +openai/types/webhooks/unwrap_webhook_event.py,sha256=KrfVL0-NsOuWHtRGiJfGMYwI8blUr09vUqUVJdZNpDQ,2039 openai/types/websocket_connection_options.py,sha256=4cAWpv1KKp_9pvnez7pGYzO3s8zh1WvX2xpBhpe-96k,1840 openai/version.py,sha256=cjbXKO8Ut3aiv4YlQnugff7AdC48MpSndcx96q88Yb8,62 diff --git a/portkey_ai/_vendor/openai-1.86.0.dist-info/REQUESTED b/portkey_ai/_vendor/openai-1.107.2.dist-info/REQUESTED similarity index 100% rename from portkey_ai/_vendor/openai-1.86.0.dist-info/REQUESTED rename to portkey_ai/_vendor/openai-1.107.2.dist-info/REQUESTED diff --git a/portkey_ai/_vendor/openai-1.86.0.dist-info/WHEEL b/portkey_ai/_vendor/openai-1.107.2.dist-info/WHEEL similarity index 100% rename from portkey_ai/_vendor/openai-1.86.0.dist-info/WHEEL rename to portkey_ai/_vendor/openai-1.107.2.dist-info/WHEEL diff --git a/portkey_ai/_vendor/openai-1.86.0.dist-info/entry_points.txt b/portkey_ai/_vendor/openai-1.107.2.dist-info/entry_points.txt similarity index 100% rename from portkey_ai/_vendor/openai-1.86.0.dist-info/entry_points.txt rename to portkey_ai/_vendor/openai-1.107.2.dist-info/entry_points.txt diff --git a/portkey_ai/_vendor/openai-1.86.0.dist-info/licenses/LICENSE b/portkey_ai/_vendor/openai-1.107.2.dist-info/licenses/LICENSE similarity index 100% rename from portkey_ai/_vendor/openai-1.86.0.dist-info/licenses/LICENSE rename to portkey_ai/_vendor/openai-1.107.2.dist-info/licenses/LICENSE diff --git a/portkey_ai/_vendor/openai/__init__.py b/portkey_ai/_vendor/openai/__init__.py index 92beeb5d..a03b49e0 100644 --- a/portkey_ai/_vendor/openai/__init__.py +++ b/portkey_ai/_vendor/openai/__init__.py @@ -30,9 +30,10 @@ LengthFinishReasonError, UnprocessableEntityError, APIResponseValidationError, + InvalidWebhookSignatureError, ContentFilterFinishReasonError, ) -from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient +from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient from ._utils._logs import setup_logging as _setup_logging from ._legacy_response import HttpxBinaryResponseContent as HttpxBinaryResponseContent @@ -62,6 +63,7 @@ "InternalServerError", "LengthFinishReasonError", "ContentFilterFinishReasonError", + "InvalidWebhookSignatureError", "Timeout", "RequestOptions", "Client", @@ -77,6 +79,7 @@ "DEFAULT_CONNECTION_LIMITS", "DefaultHttpxClient", "DefaultAsyncHttpxClient", + "DefaultAioHttpClient", ] if not _t.TYPE_CHECKING: @@ -120,6 +123,8 @@ project: str | None = None +webhook_secret: str | None = None + base_url: str | _httpx.URL | None = None timeout: float | Timeout | None = DEFAULT_TIMEOUT @@ -182,6 +187,17 @@ def project(self, value: str | None) -> None: # type: ignore project = value + @property # type: ignore + @override + def webhook_secret(self) -> str | None: + return webhook_secret + + @webhook_secret.setter # type: ignore + def webhook_secret(self, value: str | None) -> None: # type: ignore + global webhook_secret + + webhook_secret = value + @property @override def base_url(self) -> _httpx.URL: @@ -334,6 +350,7 @@ def _load_client() -> OpenAI: # type: ignore[reportUnusedFunction] api_key=api_key, organization=organization, project=project, + webhook_secret=webhook_secret, base_url=base_url, timeout=timeout, max_retries=max_retries, @@ -362,11 +379,14 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction] models as models, batches as batches, uploads as uploads, + realtime as realtime, + webhooks as webhooks, responses as responses, containers as containers, embeddings as embeddings, completions as completions, fine_tuning as fine_tuning, moderations as moderations, + conversations as conversations, vector_stores as vector_stores, ) diff --git a/portkey_ai/_vendor/openai/_base_client.py b/portkey_ai/_vendor/openai/_base_client.py index 8c1ea8ad..aa1ee13a 100644 --- a/portkey_ai/_vendor/openai/_base_client.py +++ b/portkey_ai/_vendor/openai/_base_client.py @@ -59,7 +59,7 @@ ModelBuilderProtocol, ) from ._utils import SensitiveHeadersFilter, is_dict, is_list, asyncify, is_given, lru_cache, is_mapping -from ._compat import PYDANTIC_V2, model_copy, model_dump +from ._compat import PYDANTIC_V1, model_copy, model_dump from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type from ._response import ( APIResponse, @@ -234,7 +234,7 @@ def _set_private_attributes( model: Type[_T], options: FinalRequestOptions, ) -> None: - if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None: + if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None: self.__pydantic_private__ = {} self._model = model @@ -322,7 +322,7 @@ def _set_private_attributes( client: AsyncAPIClient, options: FinalRequestOptions, ) -> None: - if PYDANTIC_V2 and getattr(self, "__pydantic_private__", None) is None: + if (not PYDANTIC_V1) and getattr(self, "__pydantic_private__", None) is None: self.__pydantic_private__ = {} self._model = model @@ -531,6 +531,18 @@ def _build_request( # work around https://github.com/encode/httpx/discussions/2880 kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")} + is_body_allowed = options.method.lower() != "get" + + if is_body_allowed: + if isinstance(json_data, bytes): + kwargs["content"] = json_data + else: + kwargs["json"] = json_data if is_given(json_data) else None + kwargs["files"] = files + else: + headers.pop("Content-Type", None) + kwargs.pop("data", None) + # TODO: report this error to httpx return self._client.build_request( # pyright: ignore[reportUnknownMemberType] headers=headers, @@ -542,8 +554,6 @@ def _build_request( # so that passing a `TypedDict` doesn't cause an error. # https://github.com/microsoft/pyright/issues/3526#event-6715453066 params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None, - json=json_data if is_given(json_data) else None, - files=files, **kwargs, ) @@ -1105,7 +1115,14 @@ def _process_response( origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, APIResponse): raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") @@ -1316,6 +1333,24 @@ def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) +try: + import httpx_aiohttp +except ImportError: + + class _DefaultAioHttpClient(httpx.AsyncClient): + def __init__(self, **_kwargs: Any) -> None: + raise RuntimeError("To use the aiohttp client you must have installed the package with the `aiohttp` extra") +else: + + class _DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore + def __init__(self, **kwargs: Any) -> None: + kwargs.setdefault("timeout", DEFAULT_TIMEOUT) + kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS) + kwargs.setdefault("follow_redirects", True) + + super().__init__(**kwargs) + + if TYPE_CHECKING: DefaultAsyncHttpxClient = httpx.AsyncClient """An alias to `httpx.AsyncClient` that provides the same defaults that this SDK @@ -1324,8 +1359,12 @@ def __init__(self, **kwargs: Any) -> None: This is useful because overriding the `http_client` with your own instance of `httpx.AsyncClient` will result in httpx's defaults being used, not ours. """ + + DefaultAioHttpClient = httpx.AsyncClient + """An alias to `httpx.AsyncClient` that changes the default HTTP transport to `aiohttp`.""" else: DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient + DefaultAioHttpClient = _DefaultAioHttpClient class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient): @@ -1623,7 +1662,14 @@ async def _process_response( origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, AsyncAPIResponse): raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}") diff --git a/portkey_ai/_vendor/openai/_client.py b/portkey_ai/_vendor/openai/_client.py index 4ed9a2f5..2be32fe1 100644 --- a/portkey_ai/_vendor/openai/_client.py +++ b/portkey_ai/_vendor/openai/_client.py @@ -3,7 +3,7 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING, Any, Union, Mapping +from typing import TYPE_CHECKING, Any, Union, Mapping, Callable, Awaitable from typing_extensions import Self, override import httpx @@ -25,6 +25,7 @@ get_async_library, ) from ._compat import cached_property +from ._models import FinalRequestOptions from ._version import __version__ from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import OpenAIError, APIStatusError @@ -45,18 +46,21 @@ models, batches, uploads, + realtime, responses, containers, embeddings, completions, fine_tuning, moderations, + conversations, vector_stores, ) from .resources.files import Files, AsyncFiles from .resources.images import Images, AsyncImages from .resources.models import Models, AsyncModels from .resources.batches import Batches, AsyncBatches + from .resources.webhooks import Webhooks, AsyncWebhooks from .resources.beta.beta import Beta, AsyncBeta from .resources.chat.chat import Chat, AsyncChat from .resources.embeddings import Embeddings, AsyncEmbeddings @@ -65,9 +69,11 @@ from .resources.evals.evals import Evals, AsyncEvals from .resources.moderations import Moderations, AsyncModerations from .resources.uploads.uploads import Uploads, AsyncUploads + from .resources.realtime.realtime import Realtime, AsyncRealtime from .resources.responses.responses import Responses, AsyncResponses from .resources.containers.containers import Containers, AsyncContainers from .resources.fine_tuning.fine_tuning import FineTuning, AsyncFineTuning + from .resources.conversations.conversations import Conversations, AsyncConversations from .resources.vector_stores.vector_stores import VectorStores, AsyncVectorStores __all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"] @@ -78,6 +84,7 @@ class OpenAI(SyncAPIClient): api_key: str organization: str | None project: str | None + webhook_secret: str | None websocket_base_url: str | httpx.URL | None """Base URL for WebSocket connections. @@ -90,9 +97,10 @@ class OpenAI(SyncAPIClient): def __init__( self, *, - api_key: str | None = None, + api_key: str | None | Callable[[], str] = None, organization: str | None = None, project: str | None = None, + webhook_secret: str | None = None, base_url: str | httpx.URL | None = None, websocket_base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, @@ -119,6 +127,7 @@ def __init__( - `api_key` from `OPENAI_API_KEY` - `organization` from `OPENAI_ORG_ID` - `project` from `OPENAI_PROJECT_ID` + - `webhook_secret` from `OPENAI_WEBHOOK_SECRET` """ if api_key is None: api_key = os.environ.get("OPENAI_API_KEY") @@ -126,7 +135,12 @@ def __init__( raise OpenAIError( "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable" ) - self.api_key = api_key + if callable(api_key): + self.api_key = "" + self._api_key_provider: Callable[[], str] | None = api_key + else: + self.api_key = api_key + self._api_key_provider = None if organization is None: organization = os.environ.get("OPENAI_ORG_ID") @@ -136,6 +150,10 @@ def __init__( project = os.environ.get("OPENAI_PROJECT_ID") self.project = project + if webhook_secret is None: + webhook_secret = os.environ.get("OPENAI_WEBHOOK_SECRET") + self.webhook_secret = webhook_secret + self.websocket_base_url = websocket_base_url if base_url is None: @@ -216,6 +234,12 @@ def vector_stores(self) -> VectorStores: return VectorStores(self) + @cached_property + def webhooks(self) -> Webhooks: + from .resources.webhooks import Webhooks + + return Webhooks(self) + @cached_property def beta(self) -> Beta: from .resources.beta import Beta @@ -240,6 +264,18 @@ def responses(self) -> Responses: return Responses(self) + @cached_property + def realtime(self) -> Realtime: + from .resources.realtime import Realtime + + return Realtime(self) + + @cached_property + def conversations(self) -> Conversations: + from .resources.conversations import Conversations + + return Conversations(self) + @cached_property def evals(self) -> Evals: from .resources.evals import Evals @@ -265,10 +301,22 @@ def with_streaming_response(self) -> OpenAIWithStreamedResponse: def qs(self) -> Querystring: return Querystring(array_format="brackets") + def _refresh_api_key(self) -> None: + if self._api_key_provider: + self.api_key = self._api_key_provider() + + @override + def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions: + self._refresh_api_key() + return super()._prepare_options(options) + @property @override def auth_headers(self) -> dict[str, str]: api_key = self.api_key + if not api_key: + # if the api key is an empty string, encoding the header will fail + return {} return {"Authorization": f"Bearer {api_key}"} @property @@ -285,9 +333,10 @@ def default_headers(self) -> dict[str, str | Omit]: def copy( self, *, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, organization: str | None = None, project: str | None = None, + webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, @@ -322,9 +371,10 @@ def copy( http_client = http_client or self._client return self.__class__( - api_key=api_key or self.api_key, + api_key=api_key or self._api_key_provider or self.api_key, organization=organization or self.organization, project=project or self.project, + webhook_secret=webhook_secret or self.webhook_secret, websocket_base_url=websocket_base_url or self.websocket_base_url, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, @@ -379,6 +429,7 @@ class AsyncOpenAI(AsyncAPIClient): api_key: str organization: str | None project: str | None + webhook_secret: str | None websocket_base_url: str | httpx.URL | None """Base URL for WebSocket connections. @@ -391,9 +442,10 @@ class AsyncOpenAI(AsyncAPIClient): def __init__( self, *, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, organization: str | None = None, project: str | None = None, + webhook_secret: str | None = None, base_url: str | httpx.URL | None = None, websocket_base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, @@ -420,6 +472,7 @@ def __init__( - `api_key` from `OPENAI_API_KEY` - `organization` from `OPENAI_ORG_ID` - `project` from `OPENAI_PROJECT_ID` + - `webhook_secret` from `OPENAI_WEBHOOK_SECRET` """ if api_key is None: api_key = os.environ.get("OPENAI_API_KEY") @@ -427,7 +480,12 @@ def __init__( raise OpenAIError( "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable" ) - self.api_key = api_key + if callable(api_key): + self.api_key = "" + self._api_key_provider: Callable[[], Awaitable[str]] | None = api_key + else: + self.api_key = api_key + self._api_key_provider = None if organization is None: organization = os.environ.get("OPENAI_ORG_ID") @@ -437,6 +495,10 @@ def __init__( project = os.environ.get("OPENAI_PROJECT_ID") self.project = project + if webhook_secret is None: + webhook_secret = os.environ.get("OPENAI_WEBHOOK_SECRET") + self.webhook_secret = webhook_secret + self.websocket_base_url = websocket_base_url if base_url is None: @@ -517,6 +579,12 @@ def vector_stores(self) -> AsyncVectorStores: return AsyncVectorStores(self) + @cached_property + def webhooks(self) -> AsyncWebhooks: + from .resources.webhooks import AsyncWebhooks + + return AsyncWebhooks(self) + @cached_property def beta(self) -> AsyncBeta: from .resources.beta import AsyncBeta @@ -541,6 +609,18 @@ def responses(self) -> AsyncResponses: return AsyncResponses(self) + @cached_property + def realtime(self) -> AsyncRealtime: + from .resources.realtime import AsyncRealtime + + return AsyncRealtime(self) + + @cached_property + def conversations(self) -> AsyncConversations: + from .resources.conversations import AsyncConversations + + return AsyncConversations(self) + @cached_property def evals(self) -> AsyncEvals: from .resources.evals import AsyncEvals @@ -566,10 +646,22 @@ def with_streaming_response(self) -> AsyncOpenAIWithStreamedResponse: def qs(self) -> Querystring: return Querystring(array_format="brackets") + async def _refresh_api_key(self) -> None: + if self._api_key_provider: + self.api_key = await self._api_key_provider() + + @override + async def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions: + await self._refresh_api_key() + return await super()._prepare_options(options) + @property @override def auth_headers(self) -> dict[str, str]: api_key = self.api_key + if not api_key: + # if the api key is an empty string, encoding the header will fail + return {} return {"Authorization": f"Bearer {api_key}"} @property @@ -586,9 +678,10 @@ def default_headers(self) -> dict[str, str | Omit]: def copy( self, *, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, organization: str | None = None, project: str | None = None, + webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, @@ -623,9 +716,10 @@ def copy( http_client = http_client or self._client return self.__class__( - api_key=api_key or self.api_key, + api_key=api_key or self._api_key_provider or self.api_key, organization=organization or self.organization, project=project or self.project, + webhook_secret=webhook_secret or self.webhook_secret, websocket_base_url=websocket_base_url or self.websocket_base_url, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, @@ -765,6 +859,18 @@ def responses(self) -> responses.ResponsesWithRawResponse: return ResponsesWithRawResponse(self._client.responses) + @cached_property + def realtime(self) -> realtime.RealtimeWithRawResponse: + from .resources.realtime import RealtimeWithRawResponse + + return RealtimeWithRawResponse(self._client.realtime) + + @cached_property + def conversations(self) -> conversations.ConversationsWithRawResponse: + from .resources.conversations import ConversationsWithRawResponse + + return ConversationsWithRawResponse(self._client.conversations) + @cached_property def evals(self) -> evals.EvalsWithRawResponse: from .resources.evals import EvalsWithRawResponse @@ -868,6 +974,18 @@ def responses(self) -> responses.AsyncResponsesWithRawResponse: return AsyncResponsesWithRawResponse(self._client.responses) + @cached_property + def realtime(self) -> realtime.AsyncRealtimeWithRawResponse: + from .resources.realtime import AsyncRealtimeWithRawResponse + + return AsyncRealtimeWithRawResponse(self._client.realtime) + + @cached_property + def conversations(self) -> conversations.AsyncConversationsWithRawResponse: + from .resources.conversations import AsyncConversationsWithRawResponse + + return AsyncConversationsWithRawResponse(self._client.conversations) + @cached_property def evals(self) -> evals.AsyncEvalsWithRawResponse: from .resources.evals import AsyncEvalsWithRawResponse @@ -971,6 +1089,18 @@ def responses(self) -> responses.ResponsesWithStreamingResponse: return ResponsesWithStreamingResponse(self._client.responses) + @cached_property + def realtime(self) -> realtime.RealtimeWithStreamingResponse: + from .resources.realtime import RealtimeWithStreamingResponse + + return RealtimeWithStreamingResponse(self._client.realtime) + + @cached_property + def conversations(self) -> conversations.ConversationsWithStreamingResponse: + from .resources.conversations import ConversationsWithStreamingResponse + + return ConversationsWithStreamingResponse(self._client.conversations) + @cached_property def evals(self) -> evals.EvalsWithStreamingResponse: from .resources.evals import EvalsWithStreamingResponse @@ -1074,6 +1204,18 @@ def responses(self) -> responses.AsyncResponsesWithStreamingResponse: return AsyncResponsesWithStreamingResponse(self._client.responses) + @cached_property + def realtime(self) -> realtime.AsyncRealtimeWithStreamingResponse: + from .resources.realtime import AsyncRealtimeWithStreamingResponse + + return AsyncRealtimeWithStreamingResponse(self._client.realtime) + + @cached_property + def conversations(self) -> conversations.AsyncConversationsWithStreamingResponse: + from .resources.conversations import AsyncConversationsWithStreamingResponse + + return AsyncConversationsWithStreamingResponse(self._client.conversations) + @cached_property def evals(self) -> evals.AsyncEvalsWithStreamingResponse: from .resources.evals import AsyncEvalsWithStreamingResponse diff --git a/portkey_ai/_vendor/openai/_compat.py b/portkey_ai/_vendor/openai/_compat.py index 87fc3707..73a1f3ea 100644 --- a/portkey_ai/_vendor/openai/_compat.py +++ b/portkey_ai/_vendor/openai/_compat.py @@ -12,14 +12,13 @@ _T = TypeVar("_T") _ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel) -# --------------- Pydantic v2 compatibility --------------- +# --------------- Pydantic v2, v3 compatibility --------------- # Pyright incorrectly reports some of our functions as overriding a method when they don't # pyright: reportIncompatibleMethodOverride=false -PYDANTIC_V2 = pydantic.VERSION.startswith("2.") +PYDANTIC_V1 = pydantic.VERSION.startswith("1.") -# v1 re-exports if TYPE_CHECKING: def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001 @@ -44,90 +43,92 @@ def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001 ... else: - if PYDANTIC_V2: - from pydantic.v1.typing import ( + # v1 re-exports + if PYDANTIC_V1: + from pydantic.typing import ( get_args as get_args, is_union as is_union, get_origin as get_origin, is_typeddict as is_typeddict, is_literal_type as is_literal_type, ) - from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime + from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime else: - from pydantic.typing import ( + from ._utils import ( get_args as get_args, is_union as is_union, get_origin as get_origin, + parse_date as parse_date, is_typeddict as is_typeddict, + parse_datetime as parse_datetime, is_literal_type as is_literal_type, ) - from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime # refactored config if TYPE_CHECKING: from pydantic import ConfigDict as ConfigDict else: - if PYDANTIC_V2: - from pydantic import ConfigDict - else: + if PYDANTIC_V1: # TODO: provide an error message here? ConfigDict = None + else: + from pydantic import ConfigDict as ConfigDict # renamed methods / properties def parse_obj(model: type[_ModelT], value: object) -> _ModelT: - if PYDANTIC_V2: - return model.model_validate(value) - else: + if PYDANTIC_V1: return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + else: + return model.model_validate(value) def field_is_required(field: FieldInfo) -> bool: - if PYDANTIC_V2: - return field.is_required() - return field.required # type: ignore + if PYDANTIC_V1: + return field.required # type: ignore + return field.is_required() def field_get_default(field: FieldInfo) -> Any: value = field.get_default() - if PYDANTIC_V2: - from pydantic_core import PydanticUndefined - - if value == PydanticUndefined: - return None + if PYDANTIC_V1: return value + from pydantic_core import PydanticUndefined + + if value == PydanticUndefined: + return None return value def field_outer_type(field: FieldInfo) -> Any: - if PYDANTIC_V2: - return field.annotation - return field.outer_type_ # type: ignore + if PYDANTIC_V1: + return field.outer_type_ # type: ignore + return field.annotation def get_model_config(model: type[pydantic.BaseModel]) -> Any: - if PYDANTIC_V2: - return model.model_config - return model.__config__ # type: ignore + if PYDANTIC_V1: + return model.__config__ # type: ignore + return model.model_config def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]: - if PYDANTIC_V2: - return model.model_fields - return model.__fields__ # type: ignore + if PYDANTIC_V1: + return model.__fields__ # type: ignore + return model.model_fields def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT: - if PYDANTIC_V2: - return model.model_copy(deep=deep) - return model.copy(deep=deep) # type: ignore + if PYDANTIC_V1: + return model.copy(deep=deep) # type: ignore + return model.model_copy(deep=deep) def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: - if PYDANTIC_V2: - return model.model_dump_json(indent=indent) - return model.json(indent=indent) # type: ignore + if PYDANTIC_V1: + return model.json(indent=indent) # type: ignore + return model.model_dump_json(indent=indent) def model_dump( @@ -139,14 +140,14 @@ def model_dump( warnings: bool = True, mode: Literal["json", "python"] = "python", ) -> dict[str, Any]: - if PYDANTIC_V2 or hasattr(model, "model_dump"): + if (not PYDANTIC_V1) or hasattr(model, "model_dump"): return model.model_dump( mode=mode, exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, # warnings are not supported in Pydantic v1 - warnings=warnings if PYDANTIC_V2 else True, + warnings=True if PYDANTIC_V1 else warnings, ) return cast( "dict[str, Any]", @@ -159,21 +160,21 @@ def model_dump( def model_parse(model: type[_ModelT], data: Any) -> _ModelT: - if PYDANTIC_V2: - return model.model_validate(data) - return model.parse_obj(data) # pyright: ignore[reportDeprecated] + if PYDANTIC_V1: + return model.parse_obj(data) # pyright: ignore[reportDeprecated] + return model.model_validate(data) def model_parse_json(model: type[_ModelT], data: str | bytes) -> _ModelT: - if PYDANTIC_V2: - return model.model_validate_json(data) - return model.parse_raw(data) # pyright: ignore[reportDeprecated] + if PYDANTIC_V1: + return model.parse_raw(data) # pyright: ignore[reportDeprecated] + return model.model_validate_json(data) def model_json_schema(model: type[_ModelT]) -> dict[str, Any]: - if PYDANTIC_V2: - return model.model_json_schema() - return model.schema() # pyright: ignore[reportDeprecated] + if PYDANTIC_V1: + return model.schema() # pyright: ignore[reportDeprecated] + return model.model_json_schema() # generic models @@ -182,17 +183,16 @@ def model_json_schema(model: type[_ModelT]) -> dict[str, Any]: class GenericModel(pydantic.BaseModel): ... else: - if PYDANTIC_V2: + if PYDANTIC_V1: + import pydantic.generics + + class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... + else: # there no longer needs to be a distinction in v2 but # we still have to create our own subclass to avoid # inconsistent MRO ordering errors class GenericModel(pydantic.BaseModel): ... - else: - import pydantic.generics - - class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... - # cached properties if TYPE_CHECKING: diff --git a/portkey_ai/_vendor/openai/_exceptions.py b/portkey_ai/_vendor/openai/_exceptions.py index e326ed95..09016dfe 100644 --- a/portkey_ai/_vendor/openai/_exceptions.py +++ b/portkey_ai/_vendor/openai/_exceptions.py @@ -24,6 +24,7 @@ "InternalServerError", "LengthFinishReasonError", "ContentFilterFinishReasonError", + "InvalidWebhookSignatureError", ] @@ -154,3 +155,7 @@ def __init__(self) -> None: super().__init__( f"Could not parse response content as the request was rejected by the content filter", ) + + +class InvalidWebhookSignatureError(ValueError): + """Raised when a webhook signature is invalid, meaning the computed signature does not match the expected signature.""" diff --git a/portkey_ai/_vendor/openai/_files.py b/portkey_ai/_vendor/openai/_files.py index 801a0d29..7b23ca08 100644 --- a/portkey_ai/_vendor/openai/_files.py +++ b/portkey_ai/_vendor/openai/_files.py @@ -69,12 +69,12 @@ def _transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], _read_file_content(file[1]), *file[2:]) + return (file[0], read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -def _read_file_content(file: FileContent) -> HttpxFileContent: +def read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return pathlib.Path(file).read_bytes() return file @@ -111,12 +111,12 @@ async def _async_transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], await _async_read_file_content(file[1]), *file[2:]) + return (file[0], await async_read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -async def _async_read_file_content(file: FileContent) -> HttpxFileContent: +async def async_read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return await anyio.Path(file).read_bytes() diff --git a/portkey_ai/_vendor/openai/_models.py b/portkey_ai/_vendor/openai/_models.py index 065e8da7..8ee8612d 100644 --- a/portkey_ai/_vendor/openai/_models.py +++ b/portkey_ai/_vendor/openai/_models.py @@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any, Type, Tuple, Union, Generic, TypeVar, Callable, Optional, cast from datetime import date, datetime from typing_extensions import ( + List, Unpack, Literal, ClassVar, @@ -50,7 +51,7 @@ strip_annotated_type, ) from ._compat import ( - PYDANTIC_V2, + PYDANTIC_V1, ConfigDict, GenericModel as BaseGenericModel, get_args, @@ -83,11 +84,7 @@ class _ConfigProtocol(Protocol): class BaseModel(pydantic.BaseModel): - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict( - extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true")) - ) - else: + if PYDANTIC_V1: @property @override @@ -102,6 +99,10 @@ class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] def __repr_args__(self) -> ReprArgs: # we don't want these attributes to be included when something like `rich.print` is used return [arg for arg in super().__repr_args__() if arg[0] not in {"_request_id", "__exclude_fields__"}] + else: + model_config: ClassVar[ConfigDict] = ConfigDict( + extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true")) + ) if TYPE_CHECKING: _request_id: Optional[str] = None @@ -232,28 +233,32 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] else: fields_values[name] = field_get_default(field) + extra_field_type = _get_extra_fields_type(__cls) + _extra = {} for key, value in values.items(): if key not in model_fields: - if PYDANTIC_V2: - _extra[key] = value - else: + parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value + + if PYDANTIC_V1: _fields_set.add(key) - fields_values[key] = value + fields_values[key] = parsed + else: + _extra[key] = parsed object.__setattr__(m, "__dict__", fields_values) - if PYDANTIC_V2: - # these properties are copied from Pydantic's `model_construct()` method - object.__setattr__(m, "__pydantic_private__", None) - object.__setattr__(m, "__pydantic_extra__", _extra) - object.__setattr__(m, "__pydantic_fields_set__", _fields_set) - else: + if PYDANTIC_V1: # init_private_attributes() does not exist in v2 m._init_private_attributes() # type: ignore # copied from Pydantic v1's `construct()` method object.__setattr__(m, "__fields_set__", _fields_set) + else: + # these properties are copied from Pydantic's `model_construct()` method + object.__setattr__(m, "__pydantic_private__", None) + object.__setattr__(m, "__pydantic_extra__", _extra) + object.__setattr__(m, "__pydantic_fields_set__", _fields_set) return m @@ -263,7 +268,7 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] # although not in practice model_construct = construct - if not PYDANTIC_V2: + if PYDANTIC_V1: # we define aliases for some of the new pydantic v2 methods so # that we can just document these methods without having to specify # a specific pydantic version as some users may not know which @@ -324,7 +329,7 @@ def model_dump( exclude_none=exclude_none, ) - return cast(dict[str, Any], json_safe(dumped)) if mode == "json" else dumped + return cast("dict[str, Any]", json_safe(dumped)) if mode == "json" else dumped @override def model_dump_json( @@ -383,15 +388,32 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: if value is None: return field_get_default(field) - if PYDANTIC_V2: - type_ = field.annotation - else: + if PYDANTIC_V1: type_ = cast(type, field.outer_type_) # type: ignore + else: + type_ = field.annotation # type: ignore if type_ is None: raise RuntimeError(f"Unexpected field type is None for {key}") - return construct_type(value=value, type_=type_) + return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None)) + + +def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None: + if PYDANTIC_V1: + # TODO + return None + + schema = cls.__pydantic_core_schema__ + if schema["type"] == "model": + fields = schema["schema"] + if fields["type"] == "model-fields": + extras = fields.get("extras_schema") + if extras and "cls" in extras: + # mypy can't narrow the type + return extras["cls"] # type: ignore[no-any-return] + + return None def is_basemodel(type_: type) -> bool: @@ -445,7 +467,7 @@ def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: return cast(_T, construct_type(value=value, type_=type_)) -def construct_type(*, value: object, type_: object) -> object: +def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object: """Loose coercion to the expected type with construction of nested values. If the given value does not match the expected type then it is returned as-is. @@ -463,8 +485,10 @@ def construct_type(*, value: object, type_: object) -> object: type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(type_): - meta: tuple[Any, ...] = get_args(type_)[1:] + if metadata is not None and len(metadata) > 0: + meta: tuple[Any, ...] = tuple(metadata) + elif is_annotated_type(type_): + meta = get_args(type_)[1:] type_ = extract_type_arg(type_, 0) else: meta = tuple() @@ -629,30 +653,30 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, for variant in get_args(union): variant = strip_annotated_type(variant) if is_basemodel_type(variant): - if PYDANTIC_V2: - field = _extract_field_schema_pv2(variant, discriminator_field_name) - if not field: + if PYDANTIC_V1: + field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] + if not field_info: continue # Note: if one variant defines an alias then they all should - discriminator_alias = field.get("serialization_alias") - - field_schema = field["schema"] + discriminator_alias = field_info.alias - if field_schema["type"] == "literal": - for entry in cast("LiteralSchema", field_schema)["expected"]: + if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation): + for entry in get_args(annotation): if isinstance(entry, str): mapping[entry] = variant else: - field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] - if not field_info: + field = _extract_field_schema_pv2(variant, discriminator_field_name) + if not field: continue # Note: if one variant defines an alias then they all should - discriminator_alias = field_info.alias + discriminator_alias = field.get("serialization_alias") - if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation): - for entry in get_args(annotation): + field_schema = field["schema"] + + if field_schema["type"] == "literal": + for entry in cast("LiteralSchema", field_schema)["expected"]: if isinstance(entry, str): mapping[entry] = variant @@ -711,7 +735,7 @@ def add_request_id(obj: BaseModel, request_id: str | None) -> None: # in Pydantic v1, using setattr like we do above causes the attribute # to be included when serializing the model which we don't want in this # case so we need to explicitly exclude it - if not PYDANTIC_V2: + if PYDANTIC_V1: try: exclude_fields = obj.__exclude_fields__ # type: ignore except AttributeError: @@ -730,7 +754,7 @@ class GenericModel(BaseGenericModel, BaseModel): pass -if PYDANTIC_V2: +if not PYDANTIC_V1: from pydantic import TypeAdapter as _TypeAdapter _CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter)) @@ -798,12 +822,12 @@ class FinalRequestOptions(pydantic.BaseModel): json_data: Union[Body, None] = None extra_json: Union[AnyMapping, None] = None - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) - else: + if PYDANTIC_V1: class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] arbitrary_types_allowed: bool = True + else: + model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) def get_max_retries(self, max_retries: int) -> int: if isinstance(self.max_retries, NotGiven): @@ -836,9 +860,9 @@ def construct( # type: ignore key: strip_not_given(value) for key, value in values.items() } - if PYDANTIC_V2: - return super().model_construct(_fields_set, **kwargs) - return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated] + if PYDANTIC_V1: + return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated] + return super().model_construct(_fields_set, **kwargs) if not TYPE_CHECKING: # type checkers incorrectly complain about this assignment diff --git a/portkey_ai/_vendor/openai/_module_client.py b/portkey_ai/_vendor/openai/_module_client.py index fb7c7549..4ecc2842 100644 --- a/portkey_ai/_vendor/openai/_module_client.py +++ b/portkey_ai/_vendor/openai/_module_client.py @@ -10,6 +10,7 @@ from .resources.images import Images from .resources.models import Models from .resources.batches import Batches + from .resources.webhooks import Webhooks from .resources.beta.beta import Beta from .resources.chat.chat import Chat from .resources.embeddings import Embeddings @@ -18,9 +19,11 @@ from .resources.evals.evals import Evals from .resources.moderations import Moderations from .resources.uploads.uploads import Uploads + from .resources.realtime.realtime import Realtime from .resources.responses.responses import Responses from .resources.containers.containers import Containers from .resources.fine_tuning.fine_tuning import FineTuning + from .resources.conversations.conversations import Conversations from .resources.vector_stores.vector_stores import VectorStores from . import _load_client @@ -81,6 +84,18 @@ def __load__(self) -> Uploads: return _load_client().uploads +class WebhooksProxy(LazyProxy["Webhooks"]): + @override + def __load__(self) -> Webhooks: + return _load_client().webhooks + + +class RealtimeProxy(LazyProxy["Realtime"]): + @override + def __load__(self) -> Realtime: + return _load_client().realtime + + class ResponsesProxy(LazyProxy["Responses"]): @override def __load__(self) -> Responses: @@ -123,6 +138,12 @@ def __load__(self) -> VectorStores: return _load_client().vector_stores +class ConversationsProxy(LazyProxy["Conversations"]): + @override + def __load__(self) -> Conversations: + return _load_client().conversations + + chat: Chat = ChatProxy().__as_proxied__() beta: Beta = BetaProxy().__as_proxied__() files: Files = FilesProxy().__as_proxied__() @@ -132,6 +153,8 @@ def __load__(self) -> VectorStores: models: Models = ModelsProxy().__as_proxied__() batches: Batches = BatchesProxy().__as_proxied__() uploads: Uploads = UploadsProxy().__as_proxied__() +webhooks: Webhooks = WebhooksProxy().__as_proxied__() +realtime: Realtime = RealtimeProxy().__as_proxied__() responses: Responses = ResponsesProxy().__as_proxied__() embeddings: Embeddings = EmbeddingsProxy().__as_proxied__() containers: Containers = ContainersProxy().__as_proxied__() @@ -139,3 +162,4 @@ def __load__(self) -> VectorStores: moderations: Moderations = ModerationsProxy().__as_proxied__() fine_tuning: FineTuning = FineTuningProxy().__as_proxied__() vector_stores: VectorStores = VectorStoresProxy().__as_proxied__() +conversations: Conversations = ConversationsProxy().__as_proxied__() diff --git a/portkey_ai/_vendor/openai/_streaming.py b/portkey_ai/_vendor/openai/_streaming.py index f5621f92..f586de74 100644 --- a/portkey_ai/_vendor/openai/_streaming.py +++ b/portkey_ai/_vendor/openai/_streaming.py @@ -59,9 +59,11 @@ def __stream__(self) -> Iterator[_T]: if sse.data.startswith("[DONE]"): break - if sse.event is None or sse.event.startswith("response.") or sse.event.startswith("transcript."): + # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data + if sse.event and sse.event.startswith("thread."): data = sse.json() - if is_mapping(data) and data.get("error"): + + if sse.event == "error" and is_mapping(data) and data.get("error"): message = None error = data.get("error") if is_mapping(error): @@ -75,12 +77,10 @@ def __stream__(self) -> Iterator[_T]: body=data["error"], ) - yield process_data(data=data, cast_to=cast_to, response=response) - + yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) else: data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): + if is_mapping(data) and data.get("error"): message = None error = data.get("error") if is_mapping(error): @@ -94,7 +94,7 @@ def __stream__(self) -> Iterator[_T]: body=data["error"], ) - yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) + yield process_data(data=data, cast_to=cast_to, response=response) # Ensure the entire stream is consumed for _sse in iterator: @@ -161,9 +161,11 @@ async def __stream__(self) -> AsyncIterator[_T]: if sse.data.startswith("[DONE]"): break - if sse.event is None or sse.event.startswith("response.") or sse.event.startswith("transcript."): + # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data + if sse.event and sse.event.startswith("thread."): data = sse.json() - if is_mapping(data) and data.get("error"): + + if sse.event == "error" and is_mapping(data) and data.get("error"): message = None error = data.get("error") if is_mapping(error): @@ -177,12 +179,10 @@ async def __stream__(self) -> AsyncIterator[_T]: body=data["error"], ) - yield process_data(data=data, cast_to=cast_to, response=response) - + yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) else: data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): + if is_mapping(data) and data.get("error"): message = None error = data.get("error") if is_mapping(error): @@ -196,7 +196,7 @@ async def __stream__(self) -> AsyncIterator[_T]: body=data["error"], ) - yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) + yield process_data(data=data, cast_to=cast_to, response=response) # Ensure the entire stream is consumed async for _sse in iterator: diff --git a/portkey_ai/_vendor/openai/_types.py b/portkey_ai/_vendor/openai/_types.py index 5dae55f4..0e8ffa12 100644 --- a/portkey_ai/_vendor/openai/_types.py +++ b/portkey_ai/_vendor/openai/_types.py @@ -13,10 +13,21 @@ Mapping, TypeVar, Callable, + Iterator, Optional, Sequence, ) -from typing_extensions import Set, Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable +from typing_extensions import ( + Set, + Literal, + Protocol, + TypeAlias, + TypedDict, + SupportsIndex, + overload, + override, + runtime_checkable, +) import httpx import pydantic @@ -219,3 +230,26 @@ class _GenericAlias(Protocol): class HttpxSendArgs(TypedDict, total=False): auth: httpx.Auth follow_redirects: bool + + +_T_co = TypeVar("_T_co", covariant=True) + + +if TYPE_CHECKING: + # This works because str.__contains__ does not accept object (either in typeshed or at runtime) + # https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285 + class SequenceNotStr(Protocol[_T_co]): + @overload + def __getitem__(self, index: SupportsIndex, /) -> _T_co: ... + @overload + def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ... + def __contains__(self, value: object, /) -> bool: ... + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[_T_co]: ... + def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ... + def count(self, value: Any, /) -> int: ... + def __reversed__(self) -> Iterator[_T_co]: ... +else: + # just point this to a normal `Sequence` at runtime to avoid having to special case + # deserializing our custom sequence type + SequenceNotStr = Sequence diff --git a/portkey_ai/_vendor/openai/_utils/__init__.py b/portkey_ai/_vendor/openai/_utils/__init__.py index bd01c088..963c83b6 100644 --- a/portkey_ai/_vendor/openai/_utils/__init__.py +++ b/portkey_ai/_vendor/openai/_utils/__init__.py @@ -11,7 +11,6 @@ lru_cache as lru_cache, is_mapping as is_mapping, is_tuple_t as is_tuple_t, - parse_date as parse_date, is_iterable as is_iterable, is_sequence as is_sequence, coerce_float as coerce_float, @@ -24,7 +23,6 @@ coerce_boolean as coerce_boolean, coerce_integer as coerce_integer, file_from_path as file_from_path, - parse_datetime as parse_datetime, is_azure_client as is_azure_client, strip_not_given as strip_not_given, deepcopy_minimal as deepcopy_minimal, @@ -35,12 +33,20 @@ maybe_coerce_integer as maybe_coerce_integer, is_async_azure_client as is_async_azure_client, ) +from ._compat import ( + get_args as get_args, + is_union as is_union, + get_origin as get_origin, + is_typeddict as is_typeddict, + is_literal_type as is_literal_type, +) from ._typing import ( is_list_type as is_list_type, is_union_type as is_union_type, extract_type_arg as extract_type_arg, is_iterable_type as is_iterable_type, is_required_type as is_required_type, + is_sequence_type as is_sequence_type, is_annotated_type as is_annotated_type, is_type_alias_type as is_type_alias_type, strip_annotated_type as strip_annotated_type, @@ -58,3 +64,4 @@ function_has_argument as function_has_argument, assert_signatures_in_sync as assert_signatures_in_sync, ) +from ._datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime diff --git a/portkey_ai/_vendor/openai/_utils/_compat.py b/portkey_ai/_vendor/openai/_utils/_compat.py new file mode 100644 index 00000000..dd703233 --- /dev/null +++ b/portkey_ai/_vendor/openai/_utils/_compat.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import sys +import typing_extensions +from typing import Any, Type, Union, Literal, Optional +from datetime import date, datetime +from typing_extensions import get_args as _get_args, get_origin as _get_origin + +from .._types import StrBytesIntFloat +from ._datetime_parse import parse_date as _parse_date, parse_datetime as _parse_datetime + +_LITERAL_TYPES = {Literal, typing_extensions.Literal} + + +def get_args(tp: type[Any]) -> tuple[Any, ...]: + return _get_args(tp) + + +def get_origin(tp: type[Any]) -> type[Any] | None: + return _get_origin(tp) + + +def is_union(tp: Optional[Type[Any]]) -> bool: + if sys.version_info < (3, 10): + return tp is Union # type: ignore[comparison-overlap] + else: + import types + + return tp is Union or tp is types.UnionType + + +def is_typeddict(tp: Type[Any]) -> bool: + return typing_extensions.is_typeddict(tp) + + +def is_literal_type(tp: Type[Any]) -> bool: + return get_origin(tp) in _LITERAL_TYPES + + +def parse_date(value: Union[date, StrBytesIntFloat]) -> date: + return _parse_date(value) + + +def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: + return _parse_datetime(value) diff --git a/portkey_ai/_vendor/openai/_utils/_datetime_parse.py b/portkey_ai/_vendor/openai/_utils/_datetime_parse.py new file mode 100644 index 00000000..7cb9d9e6 --- /dev/null +++ b/portkey_ai/_vendor/openai/_utils/_datetime_parse.py @@ -0,0 +1,136 @@ +""" +This file contains code from https://github.com/pydantic/pydantic/blob/main/pydantic/v1/datetime_parse.py +without the Pydantic v1 specific errors. +""" + +from __future__ import annotations + +import re +from typing import Dict, Union, Optional +from datetime import date, datetime, timezone, timedelta + +from .._types import StrBytesIntFloat + +date_expr = r"(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})" +time_expr = ( + r"(?P\d{1,2}):(?P\d{1,2})" + r"(?::(?P\d{1,2})(?:\.(?P\d{1,6})\d{0,6})?)?" + r"(?PZ|[+-]\d{2}(?::?\d{2})?)?$" +) + +date_re = re.compile(f"{date_expr}$") +datetime_re = re.compile(f"{date_expr}[T ]{time_expr}") + + +EPOCH = datetime(1970, 1, 1) +# if greater than this, the number is in ms, if less than or equal it's in seconds +# (in seconds this is 11th October 2603, in ms it's 20th August 1970) +MS_WATERSHED = int(2e10) +# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9 +MAX_NUMBER = int(3e20) + + +def _get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]: + if isinstance(value, (int, float)): + return value + try: + return float(value) + except ValueError: + return None + except TypeError: + raise TypeError(f"invalid type; expected {native_expected_type}, string, bytes, int or float") from None + + +def _from_unix_seconds(seconds: Union[int, float]) -> datetime: + if seconds > MAX_NUMBER: + return datetime.max + elif seconds < -MAX_NUMBER: + return datetime.min + + while abs(seconds) > MS_WATERSHED: + seconds /= 1000 + dt = EPOCH + timedelta(seconds=seconds) + return dt.replace(tzinfo=timezone.utc) + + +def _parse_timezone(value: Optional[str]) -> Union[None, int, timezone]: + if value == "Z": + return timezone.utc + elif value is not None: + offset_mins = int(value[-2:]) if len(value) > 3 else 0 + offset = 60 * int(value[1:3]) + offset_mins + if value[0] == "-": + offset = -offset + return timezone(timedelta(minutes=offset)) + else: + return None + + +def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: + """ + Parse a datetime/int/float/string and return a datetime.datetime. + + This function supports time zone offsets. When the input contains one, + the output uses a timezone with a fixed offset from UTC. + + Raise ValueError if the input is well formatted but not a valid datetime. + Raise ValueError if the input isn't well formatted. + """ + if isinstance(value, datetime): + return value + + number = _get_numeric(value, "datetime") + if number is not None: + return _from_unix_seconds(number) + + if isinstance(value, bytes): + value = value.decode() + + assert not isinstance(value, (float, int)) + + match = datetime_re.match(value) + if match is None: + raise ValueError("invalid datetime format") + + kw = match.groupdict() + if kw["microsecond"]: + kw["microsecond"] = kw["microsecond"].ljust(6, "0") + + tzinfo = _parse_timezone(kw.pop("tzinfo")) + kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None} + kw_["tzinfo"] = tzinfo + + return datetime(**kw_) # type: ignore + + +def parse_date(value: Union[date, StrBytesIntFloat]) -> date: + """ + Parse a date/int/float/string and return a datetime.date. + + Raise ValueError if the input is well formatted but not a valid date. + Raise ValueError if the input isn't well formatted. + """ + if isinstance(value, date): + if isinstance(value, datetime): + return value.date() + else: + return value + + number = _get_numeric(value, "date") + if number is not None: + return _from_unix_seconds(number).date() + + if isinstance(value, bytes): + value = value.decode() + + assert not isinstance(value, (float, int)) + match = date_re.match(value) + if match is None: + raise ValueError("invalid date format") + + kw = {k: int(v) for k, v in match.groupdict().items()} + + try: + return date(**kw) + except ValueError: + raise ValueError("invalid date format") from None diff --git a/portkey_ai/_vendor/openai/_utils/_transform.py b/portkey_ai/_vendor/openai/_utils/_transform.py index 4fd49a19..bc262ea3 100644 --- a/portkey_ai/_vendor/openai/_utils/_transform.py +++ b/portkey_ai/_vendor/openai/_utils/_transform.py @@ -16,18 +16,20 @@ lru_cache, is_mapping, is_iterable, + is_sequence, ) from .._files import is_base64_file_input +from ._compat import get_origin, is_typeddict from ._typing import ( is_list_type, is_union_type, extract_type_arg, is_iterable_type, is_required_type, + is_sequence_type, is_annotated_type, strip_annotated_type, ) -from .._compat import get_origin, model_dump, is_typeddict _T = TypeVar("_T") @@ -167,6 +169,8 @@ def _transform_recursive( Defaults to the same value as the `annotation` argument. """ + from .._compat import model_dump + if inner_type is None: inner_type = annotation @@ -184,6 +188,8 @@ def _transform_recursive( (is_list_type(stripped_type) and is_list(data)) # Iterable[T] or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) + # Sequence[T] + or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str)) ): # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually # intended as an iterable, so we don't transform it. @@ -329,6 +335,8 @@ async def _async_transform_recursive( Defaults to the same value as the `annotation` argument. """ + from .._compat import model_dump + if inner_type is None: inner_type = annotation @@ -346,6 +354,8 @@ async def _async_transform_recursive( (is_list_type(stripped_type) and is_list(data)) # Iterable[T] or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) + # Sequence[T] + or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str)) ): # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually # intended as an iterable, so we don't transform it. diff --git a/portkey_ai/_vendor/openai/_utils/_typing.py b/portkey_ai/_vendor/openai/_utils/_typing.py index 1bac9542..193109f3 100644 --- a/portkey_ai/_vendor/openai/_utils/_typing.py +++ b/portkey_ai/_vendor/openai/_utils/_typing.py @@ -15,7 +15,7 @@ from ._utils import lru_cache from .._types import InheritsGeneric -from .._compat import is_union as _is_union +from ._compat import is_union as _is_union def is_annotated_type(typ: type) -> bool: @@ -26,6 +26,11 @@ def is_list_type(typ: type) -> bool: return (get_origin(typ) or typ) == list +def is_sequence_type(typ: type) -> bool: + origin = get_origin(typ) or typ + return origin == typing_extensions.Sequence or origin == typing.Sequence or origin == _c_abc.Sequence + + def is_iterable_type(typ: type) -> bool: """If the given type is `typing.Iterable[T]`""" origin = get_origin(typ) or typ diff --git a/portkey_ai/_vendor/openai/_utils/_utils.py b/portkey_ai/_vendor/openai/_utils/_utils.py index 1e7d013b..4a23c96c 100644 --- a/portkey_ai/_vendor/openai/_utils/_utils.py +++ b/portkey_ai/_vendor/openai/_utils/_utils.py @@ -23,7 +23,6 @@ import sniffio from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike -from .._compat import parse_date as parse_date, parse_datetime as parse_datetime _T = TypeVar("_T") _TupleT = TypeVar("_TupleT", bound=Tuple[object, ...]) diff --git a/portkey_ai/_vendor/openai/_version.py b/portkey_ai/_vendor/openai/_version.py index c0f313e3..70f99588 100644 --- a/portkey_ai/_vendor/openai/_version.py +++ b/portkey_ai/_vendor/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.86.0" # x-release-please-version +__version__ = "1.107.2" # x-release-please-version diff --git a/portkey_ai/_vendor/openai/cli/_api/_main.py b/portkey_ai/_vendor/openai/cli/_api/_main.py index fe5a5e6f..b04a3e52 100644 --- a/portkey_ai/_vendor/openai/cli/_api/_main.py +++ b/portkey_ai/_vendor/openai/cli/_api/_main.py @@ -2,7 +2,7 @@ from argparse import ArgumentParser -from . import chat, audio, files, image, models, completions +from . import chat, audio, files, image, models, completions, fine_tuning def register_commands(parser: ArgumentParser) -> None: @@ -14,3 +14,4 @@ def register_commands(parser: ArgumentParser) -> None: files.register(subparsers) models.register(subparsers) completions.register(subparsers) + fine_tuning.register(subparsers) diff --git a/portkey_ai/_vendor/openai/cli/_api/fine_tuning/__init__.py b/portkey_ai/_vendor/openai/cli/_api/fine_tuning/__init__.py new file mode 100644 index 00000000..11a2dfcc --- /dev/null +++ b/portkey_ai/_vendor/openai/cli/_api/fine_tuning/__init__.py @@ -0,0 +1,13 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from argparse import ArgumentParser + +from . import jobs + +if TYPE_CHECKING: + from argparse import _SubParsersAction + + +def register(subparser: _SubParsersAction[ArgumentParser]) -> None: + jobs.register(subparser) diff --git a/portkey_ai/_vendor/openai/cli/_api/fine_tuning/jobs.py b/portkey_ai/_vendor/openai/cli/_api/fine_tuning/jobs.py new file mode 100644 index 00000000..806fa0f7 --- /dev/null +++ b/portkey_ai/_vendor/openai/cli/_api/fine_tuning/jobs.py @@ -0,0 +1,169 @@ +from __future__ import annotations + +import json +from typing import TYPE_CHECKING +from argparse import ArgumentParser + +from ..._utils import get_client, print_model +from ...._types import NOT_GIVEN, NotGivenOr +from ..._models import BaseModel +from ....pagination import SyncCursorPage +from ....types.fine_tuning import ( + FineTuningJob, + FineTuningJobEvent, +) + +if TYPE_CHECKING: + from argparse import _SubParsersAction + + +def register(subparser: _SubParsersAction[ArgumentParser]) -> None: + sub = subparser.add_parser("fine_tuning.jobs.create") + sub.add_argument( + "-m", + "--model", + help="The model to fine-tune.", + required=True, + ) + sub.add_argument( + "-F", + "--training-file", + help="The training file to fine-tune the model on.", + required=True, + ) + sub.add_argument( + "-H", + "--hyperparameters", + help="JSON string of hyperparameters to use for fine-tuning.", + type=str, + ) + sub.add_argument( + "-s", + "--suffix", + help="A suffix to add to the fine-tuned model name.", + ) + sub.add_argument( + "-V", + "--validation-file", + help="The validation file to use for fine-tuning.", + ) + sub.set_defaults(func=CLIFineTuningJobs.create, args_model=CLIFineTuningJobsCreateArgs) + + sub = subparser.add_parser("fine_tuning.jobs.retrieve") + sub.add_argument( + "-i", + "--id", + help="The ID of the fine-tuning job to retrieve.", + required=True, + ) + sub.set_defaults(func=CLIFineTuningJobs.retrieve, args_model=CLIFineTuningJobsRetrieveArgs) + + sub = subparser.add_parser("fine_tuning.jobs.list") + sub.add_argument( + "-a", + "--after", + help="Identifier for the last job from the previous pagination request. If provided, only jobs created after this job will be returned.", + ) + sub.add_argument( + "-l", + "--limit", + help="Number of fine-tuning jobs to retrieve.", + type=int, + ) + sub.set_defaults(func=CLIFineTuningJobs.list, args_model=CLIFineTuningJobsListArgs) + + sub = subparser.add_parser("fine_tuning.jobs.cancel") + sub.add_argument( + "-i", + "--id", + help="The ID of the fine-tuning job to cancel.", + required=True, + ) + sub.set_defaults(func=CLIFineTuningJobs.cancel, args_model=CLIFineTuningJobsCancelArgs) + + sub = subparser.add_parser("fine_tuning.jobs.list_events") + sub.add_argument( + "-i", + "--id", + help="The ID of the fine-tuning job to list events for.", + required=True, + ) + sub.add_argument( + "-a", + "--after", + help="Identifier for the last event from the previous pagination request. If provided, only events created after this event will be returned.", + ) + sub.add_argument( + "-l", + "--limit", + help="Number of fine-tuning job events to retrieve.", + type=int, + ) + sub.set_defaults(func=CLIFineTuningJobs.list_events, args_model=CLIFineTuningJobsListEventsArgs) + + +class CLIFineTuningJobsCreateArgs(BaseModel): + model: str + training_file: str + hyperparameters: NotGivenOr[str] = NOT_GIVEN + suffix: NotGivenOr[str] = NOT_GIVEN + validation_file: NotGivenOr[str] = NOT_GIVEN + + +class CLIFineTuningJobsRetrieveArgs(BaseModel): + id: str + + +class CLIFineTuningJobsListArgs(BaseModel): + after: NotGivenOr[str] = NOT_GIVEN + limit: NotGivenOr[int] = NOT_GIVEN + + +class CLIFineTuningJobsCancelArgs(BaseModel): + id: str + + +class CLIFineTuningJobsListEventsArgs(BaseModel): + id: str + after: NotGivenOr[str] = NOT_GIVEN + limit: NotGivenOr[int] = NOT_GIVEN + + +class CLIFineTuningJobs: + @staticmethod + def create(args: CLIFineTuningJobsCreateArgs) -> None: + hyperparameters = json.loads(str(args.hyperparameters)) if args.hyperparameters is not NOT_GIVEN else NOT_GIVEN + fine_tuning_job: FineTuningJob = get_client().fine_tuning.jobs.create( + model=args.model, + training_file=args.training_file, + hyperparameters=hyperparameters, + suffix=args.suffix, + validation_file=args.validation_file, + ) + print_model(fine_tuning_job) + + @staticmethod + def retrieve(args: CLIFineTuningJobsRetrieveArgs) -> None: + fine_tuning_job: FineTuningJob = get_client().fine_tuning.jobs.retrieve(fine_tuning_job_id=args.id) + print_model(fine_tuning_job) + + @staticmethod + def list(args: CLIFineTuningJobsListArgs) -> None: + fine_tuning_jobs: SyncCursorPage[FineTuningJob] = get_client().fine_tuning.jobs.list( + after=args.after or NOT_GIVEN, limit=args.limit or NOT_GIVEN + ) + print_model(fine_tuning_jobs) + + @staticmethod + def cancel(args: CLIFineTuningJobsCancelArgs) -> None: + fine_tuning_job: FineTuningJob = get_client().fine_tuning.jobs.cancel(fine_tuning_job_id=args.id) + print_model(fine_tuning_job) + + @staticmethod + def list_events(args: CLIFineTuningJobsListEventsArgs) -> None: + fine_tuning_job_events: SyncCursorPage[FineTuningJobEvent] = get_client().fine_tuning.jobs.list_events( + fine_tuning_job_id=args.id, + after=args.after or NOT_GIVEN, + limit=args.limit or NOT_GIVEN, + ) + print_model(fine_tuning_job_events) diff --git a/portkey_ai/_vendor/openai/cli/_cli.py b/portkey_ai/_vendor/openai/cli/_cli.py index aa3cd7d1..e2bd0185 100644 --- a/portkey_ai/_vendor/openai/cli/_cli.py +++ b/portkey_ai/_vendor/openai/cli/_cli.py @@ -16,7 +16,7 @@ from ._api import register_commands from ._utils import can_use_http2 from ._errors import CLIError, display_error -from .._compat import PYDANTIC_V2, ConfigDict, model_parse +from .._compat import PYDANTIC_V1, ConfigDict, model_parse from .._models import BaseModel from .._exceptions import APIError @@ -28,14 +28,14 @@ class Arguments(BaseModel): - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict( - extra="ignore", - ) - else: + if PYDANTIC_V1: class Config(pydantic.BaseConfig): # type: ignore extra: Any = pydantic.Extra.ignore # type: ignore + else: + model_config: ClassVar[ConfigDict] = ConfigDict( + extra="ignore", + ) verbosity: int version: Optional[str] = None diff --git a/portkey_ai/_vendor/openai/cli/_models.py b/portkey_ai/_vendor/openai/cli/_models.py index 5583db26..a8860896 100644 --- a/portkey_ai/_vendor/openai/cli/_models.py +++ b/portkey_ai/_vendor/openai/cli/_models.py @@ -4,14 +4,14 @@ import pydantic from .. import _models -from .._compat import PYDANTIC_V2, ConfigDict +from .._compat import PYDANTIC_V1, ConfigDict class BaseModel(_models.BaseModel): - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True) - else: + if PYDANTIC_V1: class Config(pydantic.BaseConfig): # type: ignore extra: Any = pydantic.Extra.ignore # type: ignore arbitrary_types_allowed: bool = True + else: + model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True) diff --git a/portkey_ai/_vendor/openai/lib/_parsing/_completions.py b/portkey_ai/_vendor/openai/lib/_parsing/_completions.py index c160070b..4b8b78b7 100644 --- a/portkey_ai/_vendor/openai/lib/_parsing/_completions.py +++ b/portkey_ai/_vendor/openai/lib/_parsing/_completions.py @@ -1,6 +1,7 @@ from __future__ import annotations import json +import logging from typing import TYPE_CHECKING, Any, Iterable, cast from typing_extensions import TypeVar, TypeGuard, assert_never @@ -9,7 +10,7 @@ from .._tools import PydanticFunctionTool from ..._types import NOT_GIVEN, NotGiven from ..._utils import is_dict, is_given -from ..._compat import PYDANTIC_V2, model_parse_json +from ..._compat import PYDANTIC_V1, model_parse_json from ..._models import construct_type_unchecked from .._pydantic import is_basemodel_type, to_strict_json_schema, is_dataclass_like_type from ...types.chat import ( @@ -19,14 +20,15 @@ ParsedChatCompletion, ChatCompletionMessage, ParsedFunctionToolCall, - ChatCompletionToolParam, ParsedChatCompletionMessage, + ChatCompletionToolUnionParam, + ChatCompletionFunctionToolParam, completion_create_params, ) from ..._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError from ...types.shared_params import FunctionDefinition from ...types.chat.completion_create_params import ResponseFormat as ResponseFormatParam -from ...types.chat.chat_completion_message_tool_call import Function +from ...types.chat.chat_completion_message_function_tool_call import Function ResponseFormatT = TypeVar( "ResponseFormatT", @@ -35,12 +37,36 @@ ) _default_response_format: None = None +log: logging.Logger = logging.getLogger("openai.lib.parsing") + + +def is_strict_chat_completion_tool_param( + tool: ChatCompletionToolUnionParam, +) -> TypeGuard[ChatCompletionFunctionToolParam]: + """Check if the given tool is a strict ChatCompletionFunctionToolParam.""" + if not tool["type"] == "function": + return False + if tool["function"].get("strict") is not True: + return False + + return True + + +def select_strict_chat_completion_tools( + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, +) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven: + """Select only the strict ChatCompletionFunctionToolParams from the given tools.""" + if not is_given(tools): + return NOT_GIVEN + + return [t for t in tools if is_strict_chat_completion_tool_param(t)] + def validate_input_tools( - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, -) -> None: + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, +) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven: if not is_given(tools): - return + return NOT_GIVEN for tool in tools: if tool["type"] != "function": @@ -54,11 +80,13 @@ def validate_input_tools( f"`{tool['function']['name']}` is not strict. Only `strict` function tools can be auto-parsed" ) + return cast(Iterable[ChatCompletionFunctionToolParam], tools) + def parse_chat_completion( *, response_format: type[ResponseFormatT] | completion_create_params.ResponseFormat | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, chat_completion: ChatCompletion | ParsedChatCompletion[object], ) -> ParsedChatCompletion[ResponseFormatT]: if is_given(input_tools): @@ -95,6 +123,14 @@ def parse_chat_completion( type_=ParsedFunctionToolCall, ) ) + elif tool_call.type == "custom": + # warn user that custom tool calls are not callable here + log.warning( + "Custom tool calls are not callable. Ignoring tool call: %s - %s", + tool_call.id, + tool_call.custom.name, + stacklevel=2, + ) elif TYPE_CHECKING: # type: ignore[unreachable] assert_never(tool_call) else: @@ -129,13 +165,15 @@ def parse_chat_completion( ) -def get_input_tool_by_name(*, input_tools: list[ChatCompletionToolParam], name: str) -> ChatCompletionToolParam | None: - return next((t for t in input_tools if t.get("function", {}).get("name") == name), None) +def get_input_tool_by_name( + *, input_tools: list[ChatCompletionToolUnionParam], name: str +) -> ChatCompletionFunctionToolParam | None: + return next((t for t in input_tools if t["type"] == "function" and t.get("function", {}).get("name") == name), None) def parse_function_tool_arguments( - *, input_tools: list[ChatCompletionToolParam], function: Function | ParsedFunction -) -> object: + *, input_tools: list[ChatCompletionToolUnionParam], function: Function | ParsedFunction +) -> object | None: input_tool = get_input_tool_by_name(input_tools=input_tools, name=function.name) if not input_tool: return None @@ -149,7 +187,7 @@ def parse_function_tool_arguments( if not input_fn.get("strict"): return None - return json.loads(function.arguments) + return json.loads(function.arguments) # type: ignore[no-any-return] def maybe_parse_content( @@ -180,7 +218,7 @@ def solve_response_format_t( def has_parseable_input( *, response_format: type | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, ) -> bool: if has_rich_response_format(response_format): return True @@ -208,7 +246,10 @@ def is_response_format_param(response_format: object) -> TypeGuard[ResponseForma return is_dict(response_format) -def is_parseable_tool(input_tool: ChatCompletionToolParam) -> bool: +def is_parseable_tool(input_tool: ChatCompletionToolUnionParam) -> bool: + if input_tool["type"] != "function": + return False + input_fn = cast(object, input_tool.get("function")) if isinstance(input_fn, PydanticFunctionTool): return True @@ -221,7 +262,7 @@ def _parse_content(response_format: type[ResponseFormatT], content: str) -> Resp return cast(ResponseFormatT, model_parse_json(response_format, content)) if is_dataclass_like_type(response_format): - if not PYDANTIC_V2: + if PYDANTIC_V1: raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {response_format}") return pydantic.TypeAdapter(response_format).validate_json(content) diff --git a/portkey_ai/_vendor/openai/lib/_parsing/_responses.py b/portkey_ai/_vendor/openai/lib/_parsing/_responses.py index 41be1d37..b6ebde0e 100644 --- a/portkey_ai/_vendor/openai/lib/_parsing/_responses.py +++ b/portkey_ai/_vendor/openai/lib/_parsing/_responses.py @@ -9,7 +9,7 @@ from .._tools import ResponsesPydanticFunctionTool from ..._types import NotGiven from ..._utils import is_given -from ..._compat import PYDANTIC_V2, model_parse_json +from ..._compat import PYDANTIC_V1, model_parse_json from ..._models import construct_type_unchecked from .._pydantic import is_basemodel_type, is_dataclass_like_type from ._completions import solve_response_format_t, type_to_response_format_param @@ -110,6 +110,7 @@ def parse_response( or output.type == "local_shell_call" or output.type == "mcp_list_tools" or output.type == "exec" + or output.type == "custom_tool_call" ): output_list.append(output) elif TYPE_CHECKING: # type: ignore @@ -137,7 +138,7 @@ def parse_text(text: str, text_format: type[TextFormatT] | NotGiven) -> TextForm return cast(TextFormatT, model_parse_json(text_format, text)) if is_dataclass_like_type(text_format): - if not PYDANTIC_V2: + if PYDANTIC_V1: raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {text_format}") return pydantic.TypeAdapter(text_format).validate_json(text) diff --git a/portkey_ai/_vendor/openai/lib/_pydantic.py b/portkey_ai/_vendor/openai/lib/_pydantic.py index c2d73e5f..3cfe224c 100644 --- a/portkey_ai/_vendor/openai/lib/_pydantic.py +++ b/portkey_ai/_vendor/openai/lib/_pydantic.py @@ -8,7 +8,7 @@ from .._types import NOT_GIVEN from .._utils import is_dict as _is_dict, is_list -from .._compat import PYDANTIC_V2, model_json_schema +from .._compat import PYDANTIC_V1, model_json_schema _T = TypeVar("_T") @@ -16,7 +16,7 @@ def to_strict_json_schema(model: type[pydantic.BaseModel] | pydantic.TypeAdapter[Any]) -> dict[str, Any]: if inspect.isclass(model) and is_basemodel_type(model): schema = model_json_schema(model) - elif PYDANTIC_V2 and isinstance(model, pydantic.TypeAdapter): + elif (not PYDANTIC_V1) and isinstance(model, pydantic.TypeAdapter): schema = model.json_schema() else: raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {model}") diff --git a/portkey_ai/_vendor/openai/lib/_tools.py b/portkey_ai/_vendor/openai/lib/_tools.py index 415d7500..4070ad63 100644 --- a/portkey_ai/_vendor/openai/lib/_tools.py +++ b/portkey_ai/_vendor/openai/lib/_tools.py @@ -5,7 +5,7 @@ import pydantic from ._pydantic import to_strict_json_schema -from ..types.chat import ChatCompletionToolParam +from ..types.chat import ChatCompletionFunctionToolParam from ..types.shared_params import FunctionDefinition from ..types.responses.function_tool_param import FunctionToolParam as ResponsesFunctionToolParam @@ -42,7 +42,7 @@ def pydantic_function_tool( *, name: str | None = None, # inferred from class name by default description: str | None = None, # inferred from class docstring by default -) -> ChatCompletionToolParam: +) -> ChatCompletionFunctionToolParam: if description is None: # note: we intentionally don't use `.getdoc()` to avoid # including pydantic's docstrings diff --git a/portkey_ai/_vendor/openai/lib/azure.py b/portkey_ai/_vendor/openai/lib/azure.py index 655dd71d..ad647072 100644 --- a/portkey_ai/_vendor/openai/lib/azure.py +++ b/portkey_ai/_vendor/openai/lib/azure.py @@ -94,10 +94,11 @@ def __init__( azure_endpoint: str, azure_deployment: str | None = None, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, + webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -113,10 +114,11 @@ def __init__( *, azure_deployment: str | None = None, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, + webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -132,10 +134,11 @@ def __init__( *, base_url: str, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, + webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -151,11 +154,12 @@ def __init__( api_version: str | None = None, azure_endpoint: str | None = None, azure_deployment: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, project: str | None = None, + webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, base_url: str | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, @@ -234,6 +238,7 @@ def __init__( api_key=api_key, organization=organization, project=project, + webhook_secret=webhook_secret, base_url=base_url, timeout=timeout, max_retries=max_retries, @@ -253,9 +258,10 @@ def __init__( def copy( self, *, - api_key: str | None = None, + api_key: str | Callable[[], str] | None = None, organization: str | None = None, project: str | None = None, + webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, api_version: str | None = None, azure_ad_token: str | None = None, @@ -277,6 +283,7 @@ def copy( api_key=api_key, organization=organization, project=project, + webhook_secret=webhook_secret, websocket_base_url=websocket_base_url, base_url=base_url, timeout=timeout, @@ -338,7 +345,7 @@ def _configure_realtime(self, model: str, extra_query: Query) -> tuple[httpx.URL "api-version": self._api_version, "deployment": self._azure_deployment or model, } - if self.api_key != "": + if self.api_key and self.api_key != "": auth_headers = {"api-key": self.api_key} else: token = self._get_azure_ad_token() @@ -365,11 +372,12 @@ def __init__( azure_endpoint: str, azure_deployment: str | None = None, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, project: str | None = None, + webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -385,11 +393,12 @@ def __init__( *, azure_deployment: str | None = None, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, project: str | None = None, + webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -405,11 +414,12 @@ def __init__( *, base_url: str, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, project: str | None = None, + webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -425,11 +435,12 @@ def __init__( azure_endpoint: str | None = None, azure_deployment: str | None = None, api_version: str | None = None, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, project: str | None = None, + webhook_secret: str | None = None, base_url: str | None = None, websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, @@ -508,6 +519,7 @@ def __init__( api_key=api_key, organization=organization, project=project, + webhook_secret=webhook_secret, base_url=base_url, timeout=timeout, max_retries=max_retries, @@ -527,9 +539,10 @@ def __init__( def copy( self, *, - api_key: str | None = None, + api_key: str | Callable[[], Awaitable[str]] | None = None, organization: str | None = None, project: str | None = None, + webhook_secret: str | None = None, websocket_base_url: str | httpx.URL | None = None, api_version: str | None = None, azure_ad_token: str | None = None, @@ -551,6 +564,7 @@ def copy( api_key=api_key, organization=organization, project=project, + webhook_secret=webhook_secret, websocket_base_url=websocket_base_url, base_url=base_url, timeout=timeout, @@ -614,7 +628,7 @@ async def _configure_realtime(self, model: str, extra_query: Query) -> tuple[htt "api-version": self._api_version, "deployment": self._azure_deployment or model, } - if self.api_key != "": + if self.api_key and self.api_key != "": auth_headers = {"api-key": self.api_key} else: token = await self._get_azure_ad_token() diff --git a/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py b/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py index a7b70c32..52a6a550 100644 --- a/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py +++ b/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py @@ -37,7 +37,7 @@ parse_function_tool_arguments, ) from ...._streaming import Stream, AsyncStream -from ....types.chat import ChatCompletionChunk, ParsedChatCompletion, ChatCompletionToolParam +from ....types.chat import ChatCompletionChunk, ParsedChatCompletion, ChatCompletionToolUnionParam from ...._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError from ....types.chat.chat_completion import ChoiceLogprobs from ....types.chat.chat_completion_chunk import Choice as ChoiceChunk @@ -58,7 +58,7 @@ def __init__( *, raw_stream: Stream[ChatCompletionChunk], response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, ) -> None: self._raw_stream = raw_stream self._response = raw_stream.response @@ -128,7 +128,7 @@ class ChatCompletionStreamManager(Generic[ResponseFormatT]): Usage: ```py - with client.beta.chat.completions.stream(...) as stream: + with client.chat.completions.stream(...) as stream: for event in stream: ... ``` @@ -139,7 +139,7 @@ def __init__( api_request: Callable[[], Stream[ChatCompletionChunk]], *, response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, ) -> None: self.__stream: ChatCompletionStream[ResponseFormatT] | None = None self.__api_request = api_request @@ -181,7 +181,7 @@ def __init__( *, raw_stream: AsyncStream[ChatCompletionChunk], response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, ) -> None: self._raw_stream = raw_stream self._response = raw_stream.response @@ -251,7 +251,7 @@ class AsyncChatCompletionStreamManager(Generic[ResponseFormatT]): Usage: ```py - async with client.beta.chat.completions.stream(...) as stream: + async with client.chat.completions.stream(...) as stream: for event in stream: ... ``` @@ -262,7 +262,7 @@ def __init__( api_request: Awaitable[AsyncStream[ChatCompletionChunk]], *, response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, ) -> None: self.__stream: AsyncChatCompletionStream[ResponseFormatT] | None = None self.__api_request = api_request @@ -314,7 +314,7 @@ class ChatCompletionStreamState(Generic[ResponseFormatT]): def __init__( self, *, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven = NOT_GIVEN, ) -> None: self.__current_completion_snapshot: ParsedChatCompletionSnapshot | None = None @@ -584,7 +584,7 @@ def _build_events( class ChoiceEventState: - def __init__(self, *, input_tools: list[ChatCompletionToolParam]) -> None: + def __init__(self, *, input_tools: list[ChatCompletionToolUnionParam]) -> None: self._input_tools = input_tools self._content_done = False diff --git a/portkey_ai/_vendor/openai/lib/streaming/responses/_events.py b/portkey_ai/_vendor/openai/lib/streaming/responses/_events.py index 6e547815..bdc47b83 100644 --- a/portkey_ai/_vendor/openai/lib/streaming/responses/_events.py +++ b/portkey_ai/_vendor/openai/lib/streaming/responses/_events.py @@ -21,9 +21,7 @@ ResponseRefusalDoneEvent, ResponseRefusalDeltaEvent, ResponseMcpCallFailedEvent, - ResponseReasoningDoneEvent, ResponseOutputItemDoneEvent, - ResponseReasoningDeltaEvent, ResponseContentPartDoneEvent, ResponseOutputItemAddedEvent, ResponseContentPartAddedEvent, @@ -33,19 +31,19 @@ ResponseAudioTranscriptDoneEvent, ResponseAudioTranscriptDeltaEvent, ResponseMcpCallArgumentsDoneEvent, - ResponseReasoningSummaryDoneEvent, ResponseImageGenCallCompletedEvent, ResponseMcpCallArgumentsDeltaEvent, ResponseMcpListToolsCompletedEvent, - ResponseReasoningSummaryDeltaEvent, ResponseImageGenCallGeneratingEvent, ResponseImageGenCallInProgressEvent, ResponseMcpListToolsInProgressEvent, ResponseWebSearchCallCompletedEvent, ResponseWebSearchCallSearchingEvent, + ResponseCustomToolCallInputDoneEvent, ResponseFileSearchCallCompletedEvent, ResponseFileSearchCallSearchingEvent, ResponseWebSearchCallInProgressEvent, + ResponseCustomToolCallInputDeltaEvent, ResponseFileSearchCallInProgressEvent, ResponseImageGenCallPartialImageEvent, ResponseReasoningSummaryPartDoneEvent, @@ -61,6 +59,8 @@ ResponseCodeInterpreterCallInProgressEvent, ResponseCodeInterpreterCallInterpretingEvent, ) +from ....types.responses.response_reasoning_text_done_event import ResponseReasoningTextDoneEvent +from ....types.responses.response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent TextFormatT = TypeVar( "TextFormatT", @@ -139,10 +139,10 @@ class ResponseCompletedEvent(RawResponseCompletedEvent, GenericModel, Generic[Te ResponseMcpListToolsInProgressEvent, ResponseOutputTextAnnotationAddedEvent, ResponseQueuedEvent, - ResponseReasoningDeltaEvent, - ResponseReasoningSummaryDeltaEvent, - ResponseReasoningSummaryDoneEvent, - ResponseReasoningDoneEvent, + ResponseReasoningTextDeltaEvent, + ResponseReasoningTextDoneEvent, + ResponseCustomToolCallInputDeltaEvent, + ResponseCustomToolCallInputDoneEvent, ], PropertyInfo(discriminator="type"), ] diff --git a/portkey_ai/_vendor/openai/lib/streaming/responses/_responses.py b/portkey_ai/_vendor/openai/lib/streaming/responses/_responses.py index 2c2fec54..d45664de 100644 --- a/portkey_ai/_vendor/openai/lib/streaming/responses/_responses.py +++ b/portkey_ai/_vendor/openai/lib/streaming/responses/_responses.py @@ -264,6 +264,7 @@ def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEven item_id=event.item_id, output_index=event.output_index, sequence_number=event.sequence_number, + logprobs=event.logprobs, type="response.output_text.delta", snapshot=content.text, ) @@ -282,6 +283,7 @@ def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEven item_id=event.item_id, output_index=event.output_index, sequence_number=event.sequence_number, + logprobs=event.logprobs, type="response.output_text.done", text=event.text, parsed=parse_text(event.text, text_format=self._text_format), diff --git a/portkey_ai/_vendor/openai/pagination.py b/portkey_ai/_vendor/openai/pagination.py index a59cced8..4dd3788a 100644 --- a/portkey_ai/_vendor/openai/pagination.py +++ b/portkey_ai/_vendor/openai/pagination.py @@ -5,7 +5,14 @@ from ._base_client import BasePage, PageInfo, BaseSyncPage, BaseAsyncPage -__all__ = ["SyncPage", "AsyncPage", "SyncCursorPage", "AsyncCursorPage"] +__all__ = [ + "SyncPage", + "AsyncPage", + "SyncCursorPage", + "AsyncCursorPage", + "SyncConversationCursorPage", + "AsyncConversationCursorPage", +] _T = TypeVar("_T") @@ -123,3 +130,61 @@ def next_page_info(self) -> Optional[PageInfo]: return None return PageInfo(params={"after": item.id}) + + +class SyncConversationCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]): + data: List[_T] + has_more: Optional[bool] = None + last_id: Optional[str] = None + + @override + def _get_page_items(self) -> List[_T]: + data = self.data + if not data: + return [] + return data + + @override + def has_next_page(self) -> bool: + has_more = self.has_more + if has_more is not None and has_more is False: + return False + + return super().has_next_page() + + @override + def next_page_info(self) -> Optional[PageInfo]: + last_id = self.last_id + if not last_id: + return None + + return PageInfo(params={"after": last_id}) + + +class AsyncConversationCursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]): + data: List[_T] + has_more: Optional[bool] = None + last_id: Optional[str] = None + + @override + def _get_page_items(self) -> List[_T]: + data = self.data + if not data: + return [] + return data + + @override + def has_next_page(self) -> bool: + has_more = self.has_more + if has_more is not None and has_more is False: + return False + + return super().has_next_page() + + @override + def next_page_info(self) -> Optional[PageInfo]: + last_id = self.last_id + if not last_id: + return None + + return PageInfo(params={"after": last_id}) diff --git a/portkey_ai/_vendor/openai/resources/audio/speech.py b/portkey_ai/_vendor/openai/resources/audio/speech.py index a195d713..64ce5eec 100644 --- a/portkey_ai/_vendor/openai/resources/audio/speech.py +++ b/portkey_ai/_vendor/openai/resources/audio/speech.py @@ -51,11 +51,12 @@ def create( input: str, model: Union[str, SpeechModel], voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] + str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"] ], instructions: str | NotGiven = NOT_GIVEN, response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, + stream_format: Literal["sse", "audio"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -85,7 +86,10 @@ def create( `wav`, and `pcm`. speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - the default. Does not work with `gpt-4o-mini-tts`. + the default. + + stream_format: The format to stream the audio in. Supported formats are `sse` and `audio`. + `sse` is not supported for `tts-1` or `tts-1-hd`. extra_headers: Send extra headers @@ -106,6 +110,7 @@ def create( "instructions": instructions, "response_format": response_format, "speed": speed, + "stream_format": stream_format, }, speech_create_params.SpeechCreateParams, ), @@ -142,11 +147,12 @@ async def create( input: str, model: Union[str, SpeechModel], voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] + str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"] ], instructions: str | NotGiven = NOT_GIVEN, response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, + stream_format: Literal["sse", "audio"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -176,7 +182,10 @@ async def create( `wav`, and `pcm`. speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - the default. Does not work with `gpt-4o-mini-tts`. + the default. + + stream_format: The format to stream the audio in. Supported formats are `sse` and `audio`. + `sse` is not supported for `tts-1` or `tts-1-hd`. extra_headers: Send extra headers @@ -197,6 +206,7 @@ async def create( "instructions": instructions, "response_format": response_format, "speed": speed, + "stream_format": stream_format, }, speech_create_params.SpeechCreateParams, ), diff --git a/portkey_ai/_vendor/openai/resources/batches.py b/portkey_ai/_vendor/openai/resources/batches.py index 26ea498b..2340bd2e 100644 --- a/portkey_ai/_vendor/openai/resources/batches.py +++ b/portkey_ai/_vendor/openai/resources/batches.py @@ -49,6 +49,7 @@ def create( endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + output_expires_after: batch_create_params.OutputExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -85,6 +86,9 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + output_expires_after: The expiration policy for the output and/or error file that are generated for a + batch. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -101,6 +105,7 @@ def create( "endpoint": endpoint, "input_file_id": input_file_id, "metadata": metadata, + "output_expires_after": output_expires_after, }, batch_create_params.BatchCreateParams, ), @@ -259,6 +264,7 @@ async def create( endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + output_expires_after: batch_create_params.OutputExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -295,6 +301,9 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + output_expires_after: The expiration policy for the output and/or error file that are generated for a + batch. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -311,6 +320,7 @@ async def create( "endpoint": endpoint, "input_file_id": input_file_id, "metadata": metadata, + "output_expires_after": output_expires_after, }, batch_create_params.BatchCreateParams, ), diff --git a/portkey_ai/_vendor/openai/resources/beta/assistants.py b/portkey_ai/_vendor/openai/resources/beta/assistants.py index 9059d936..fe0c99c8 100644 --- a/portkey_ai/_vendor/openai/resources/beta/assistants.py +++ b/portkey_ai/_vendor/openai/resources/beta/assistants.py @@ -96,12 +96,11 @@ def create( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -220,6 +219,12 @@ def update( model: Union[ str, Literal[ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", @@ -298,12 +303,11 @@ def update( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -545,12 +549,11 @@ async def create( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -669,6 +672,12 @@ async def update( model: Union[ str, Literal[ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", @@ -747,12 +756,11 @@ async def update( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/portkey_ai/_vendor/openai/resources/beta/beta.py b/portkey_ai/_vendor/openai/resources/beta/beta.py index 62fc8258..9084c477 100644 --- a/portkey_ai/_vendor/openai/resources/beta/beta.py +++ b/portkey_ai/_vendor/openai/resources/beta/beta.py @@ -3,7 +3,6 @@ from __future__ import annotations from ..._compat import cached_property -from .chat.chat import Chat, AsyncChat from .assistants import ( Assistants, AsyncAssistants, @@ -21,13 +20,10 @@ ThreadsWithStreamingResponse, AsyncThreadsWithStreamingResponse, ) +from ...resources.chat import Chat, AsyncChat from .realtime.realtime import ( Realtime, AsyncRealtime, - RealtimeWithRawResponse, - AsyncRealtimeWithRawResponse, - RealtimeWithStreamingResponse, - AsyncRealtimeWithStreamingResponse, ) __all__ = ["Beta", "AsyncBeta"] @@ -111,10 +107,6 @@ class BetaWithRawResponse: def __init__(self, beta: Beta) -> None: self._beta = beta - @cached_property - def realtime(self) -> RealtimeWithRawResponse: - return RealtimeWithRawResponse(self._beta.realtime) - @cached_property def assistants(self) -> AssistantsWithRawResponse: return AssistantsWithRawResponse(self._beta.assistants) @@ -128,10 +120,6 @@ class AsyncBetaWithRawResponse: def __init__(self, beta: AsyncBeta) -> None: self._beta = beta - @cached_property - def realtime(self) -> AsyncRealtimeWithRawResponse: - return AsyncRealtimeWithRawResponse(self._beta.realtime) - @cached_property def assistants(self) -> AsyncAssistantsWithRawResponse: return AsyncAssistantsWithRawResponse(self._beta.assistants) @@ -145,10 +133,6 @@ class BetaWithStreamingResponse: def __init__(self, beta: Beta) -> None: self._beta = beta - @cached_property - def realtime(self) -> RealtimeWithStreamingResponse: - return RealtimeWithStreamingResponse(self._beta.realtime) - @cached_property def assistants(self) -> AssistantsWithStreamingResponse: return AssistantsWithStreamingResponse(self._beta.assistants) @@ -162,10 +146,6 @@ class AsyncBetaWithStreamingResponse: def __init__(self, beta: AsyncBeta) -> None: self._beta = beta - @cached_property - def realtime(self) -> AsyncRealtimeWithStreamingResponse: - return AsyncRealtimeWithStreamingResponse(self._beta.realtime) - @cached_property def assistants(self) -> AsyncAssistantsWithStreamingResponse: return AsyncAssistantsWithStreamingResponse(self._beta.assistants) diff --git a/portkey_ai/_vendor/openai/resources/beta/chat/__init__.py b/portkey_ai/_vendor/openai/resources/beta/chat/__init__.py deleted file mode 100644 index 072d7867..00000000 --- a/portkey_ai/_vendor/openai/resources/beta/chat/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .chat import Chat, AsyncChat -from .completions import Completions, AsyncCompletions - -__all__ = [ - "Completions", - "AsyncCompletions", - "Chat", - "AsyncChat", -] diff --git a/portkey_ai/_vendor/openai/resources/beta/chat/chat.py b/portkey_ai/_vendor/openai/resources/beta/chat/chat.py deleted file mode 100644 index 6afdcea3..00000000 --- a/portkey_ai/_vendor/openai/resources/beta/chat/chat.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ...._compat import cached_property -from .completions import Completions, AsyncCompletions -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["Chat", "AsyncChat"] - - -class Chat(SyncAPIResource): - @cached_property - def completions(self) -> Completions: - return Completions(self._client) - - -class AsyncChat(AsyncAPIResource): - @cached_property - def completions(self) -> AsyncCompletions: - return AsyncCompletions(self._client) diff --git a/portkey_ai/_vendor/openai/resources/beta/chat/completions.py b/portkey_ai/_vendor/openai/resources/beta/chat/completions.py deleted file mode 100644 index 80e01561..00000000 --- a/portkey_ai/_vendor/openai/resources/beta/chat/completions.py +++ /dev/null @@ -1,634 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Type, Union, Iterable, Optional, cast -from functools import partial -from typing_extensions import Literal - -import httpx - -from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper -from ...._streaming import Stream -from ....types.chat import completion_create_params -from ...._base_client import make_request_options -from ....lib._parsing import ( - ResponseFormatT, - validate_input_tools as _validate_input_tools, - parse_chat_completion as _parse_chat_completion, - type_to_response_format_param as _type_to_response_format, -) -from ....types.chat_model import ChatModel -from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager -from ....types.shared_params import Metadata, ReasoningEffort -from ....types.chat.chat_completion import ChatCompletion -from ....types.chat.chat_completion_chunk import ChatCompletionChunk -from ....types.chat.parsed_chat_completion import ParsedChatCompletion -from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam -from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam -from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam -from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam -from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam -from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam - -__all__ = ["Completions", "AsyncCompletions"] - - -class Completions(SyncAPIResource): - @cached_property - def with_raw_response(self) -> CompletionsWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return the - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers - """ - return CompletionsWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> CompletionsWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/openai/openai-python#with_streaming_response - """ - return CompletionsWithStreamingResponse(self) - - def parse( - self, - *, - messages: Iterable[ChatCompletionMessageParam], - model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ParsedChatCompletion[ResponseFormatT]: - """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types - & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class. - - You can pass a pydantic model to this method and it will automatically convert the model - into a JSON schema, send it to the API and parse the response content back into the given model. - - This method will also automatically parse `function` tool calls if: - - You use the `openai.pydantic_function_tool()` helper method - - You mark your tool schema with `"strict": True` - - Example usage: - ```py - from pydantic import BaseModel - from openai import OpenAI - - - class Step(BaseModel): - explanation: str - output: str - - - class MathResponse(BaseModel): - steps: List[Step] - final_answer: str - - - client = OpenAI() - completion = client.beta.chat.completions.parse( - model="gpt-4o-2024-08-06", - messages=[ - {"role": "system", "content": "You are a helpful math tutor."}, - {"role": "user", "content": "solve 8x + 31 = 2"}, - ], - response_format=MathResponse, - ) - - message = completion.choices[0].message - if message.parsed: - print(message.parsed.steps) - print("answer: ", message.parsed.final_answer) - ``` - """ - _validate_input_tools(tools) - - extra_headers = { - "X-Stainless-Helper-Method": "beta.chat.completions.parse", - **(extra_headers or {}), - } - - def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]: - return _parse_chat_completion( - response_format=response_format, - chat_completion=raw_completion, - input_tools=tools, - ) - - return self._post( - "/chat/completions", - body=maybe_transform( - { - "messages": messages, - "model": model, - "audio": audio, - "frequency_penalty": frequency_penalty, - "function_call": function_call, - "functions": functions, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_completion_tokens": max_completion_tokens, - "max_tokens": max_tokens, - "metadata": metadata, - "modalities": modalities, - "n": n, - "parallel_tool_calls": parallel_tool_calls, - "prediction": prediction, - "presence_penalty": presence_penalty, - "reasoning_effort": reasoning_effort, - "response_format": _type_to_response_format(response_format), - "seed": seed, - "service_tier": service_tier, - "stop": stop, - "store": store, - "stream": False, - "stream_options": stream_options, - "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, - "top_logprobs": top_logprobs, - "top_p": top_p, - "user": user, - "web_search_options": web_search_options, - }, - completion_create_params.CompletionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - post_parser=parser, - ), - # we turn the `ChatCompletion` instance into a `ParsedChatCompletion` - # in the `parser` function above - cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion), - stream=False, - ) - - def stream( - self, - *, - messages: Iterable[ChatCompletionMessageParam], - model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChatCompletionStreamManager[ResponseFormatT]: - """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API - and automatic accumulation of each delta. - - This also supports all of the parsing utilities that `.parse()` does. - - Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response: - - ```py - with client.beta.chat.completions.stream( - model="gpt-4o-2024-08-06", - messages=[...], - ) as stream: - for event in stream: - if event.type == "content.delta": - print(event.delta, flush=True, end="") - ``` - - When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events). - - When the context manager exits, the response will be closed, however the `stream` instance is still available outside - the context manager. - """ - extra_headers = { - "X-Stainless-Helper-Method": "beta.chat.completions.stream", - **(extra_headers or {}), - } - - api_request: partial[Stream[ChatCompletionChunk]] = partial( - self._client.chat.completions.create, - messages=messages, - model=model, - audio=audio, - stream=True, - response_format=_type_to_response_format(response_format), - frequency_penalty=frequency_penalty, - function_call=function_call, - functions=functions, - logit_bias=logit_bias, - logprobs=logprobs, - max_completion_tokens=max_completion_tokens, - max_tokens=max_tokens, - metadata=metadata, - modalities=modalities, - n=n, - parallel_tool_calls=parallel_tool_calls, - prediction=prediction, - presence_penalty=presence_penalty, - reasoning_effort=reasoning_effort, - seed=seed, - service_tier=service_tier, - store=store, - stop=stop, - stream_options=stream_options, - temperature=temperature, - tool_choice=tool_choice, - tools=tools, - top_logprobs=top_logprobs, - top_p=top_p, - user=user, - web_search_options=web_search_options, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - return ChatCompletionStreamManager( - api_request, - response_format=response_format, - input_tools=tools, - ) - - -class AsyncCompletions(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncCompletionsWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return the - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers - """ - return AsyncCompletionsWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/openai/openai-python#with_streaming_response - """ - return AsyncCompletionsWithStreamingResponse(self) - - async def parse( - self, - *, - messages: Iterable[ChatCompletionMessageParam], - model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ParsedChatCompletion[ResponseFormatT]: - """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types - & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class. - - You can pass a pydantic model to this method and it will automatically convert the model - into a JSON schema, send it to the API and parse the response content back into the given model. - - This method will also automatically parse `function` tool calls if: - - You use the `openai.pydantic_function_tool()` helper method - - You mark your tool schema with `"strict": True` - - Example usage: - ```py - from pydantic import BaseModel - from openai import AsyncOpenAI - - - class Step(BaseModel): - explanation: str - output: str - - - class MathResponse(BaseModel): - steps: List[Step] - final_answer: str - - - client = AsyncOpenAI() - completion = await client.beta.chat.completions.parse( - model="gpt-4o-2024-08-06", - messages=[ - {"role": "system", "content": "You are a helpful math tutor."}, - {"role": "user", "content": "solve 8x + 31 = 2"}, - ], - response_format=MathResponse, - ) - - message = completion.choices[0].message - if message.parsed: - print(message.parsed.steps) - print("answer: ", message.parsed.final_answer) - ``` - """ - _validate_input_tools(tools) - - extra_headers = { - "X-Stainless-Helper-Method": "beta.chat.completions.parse", - **(extra_headers or {}), - } - - def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]: - return _parse_chat_completion( - response_format=response_format, - chat_completion=raw_completion, - input_tools=tools, - ) - - return await self._post( - "/chat/completions", - body=await async_maybe_transform( - { - "messages": messages, - "model": model, - "audio": audio, - "frequency_penalty": frequency_penalty, - "function_call": function_call, - "functions": functions, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_completion_tokens": max_completion_tokens, - "max_tokens": max_tokens, - "metadata": metadata, - "modalities": modalities, - "n": n, - "parallel_tool_calls": parallel_tool_calls, - "prediction": prediction, - "presence_penalty": presence_penalty, - "reasoning_effort": reasoning_effort, - "response_format": _type_to_response_format(response_format), - "seed": seed, - "service_tier": service_tier, - "store": store, - "stop": stop, - "stream": False, - "stream_options": stream_options, - "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, - "top_logprobs": top_logprobs, - "top_p": top_p, - "user": user, - "web_search_options": web_search_options, - }, - completion_create_params.CompletionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - post_parser=parser, - ), - # we turn the `ChatCompletion` instance into a `ParsedChatCompletion` - # in the `parser` function above - cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion), - stream=False, - ) - - def stream( - self, - *, - messages: Iterable[ChatCompletionMessageParam], - model: Union[str, ChatModel], - audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, - functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, - modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - parallel_tool_calls: bool | NotGiven = NOT_GIVEN, - prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - store: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncChatCompletionStreamManager[ResponseFormatT]: - """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API - and automatic accumulation of each delta. - - This also supports all of the parsing utilities that `.parse()` does. - - Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response: - - ```py - async with client.beta.chat.completions.stream( - model="gpt-4o-2024-08-06", - messages=[...], - ) as stream: - async for event in stream: - if event.type == "content.delta": - print(event.delta, flush=True, end="") - ``` - - When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events). - - When the context manager exits, the response will be closed, however the `stream` instance is still available outside - the context manager. - """ - _validate_input_tools(tools) - - extra_headers = { - "X-Stainless-Helper-Method": "beta.chat.completions.stream", - **(extra_headers or {}), - } - - api_request = self._client.chat.completions.create( - messages=messages, - model=model, - audio=audio, - stream=True, - response_format=_type_to_response_format(response_format), - frequency_penalty=frequency_penalty, - function_call=function_call, - functions=functions, - logit_bias=logit_bias, - logprobs=logprobs, - max_completion_tokens=max_completion_tokens, - max_tokens=max_tokens, - metadata=metadata, - modalities=modalities, - n=n, - parallel_tool_calls=parallel_tool_calls, - prediction=prediction, - presence_penalty=presence_penalty, - reasoning_effort=reasoning_effort, - seed=seed, - service_tier=service_tier, - stop=stop, - store=store, - stream_options=stream_options, - temperature=temperature, - tool_choice=tool_choice, - tools=tools, - top_logprobs=top_logprobs, - top_p=top_p, - user=user, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - web_search_options=web_search_options, - ) - return AsyncChatCompletionStreamManager( - api_request, - response_format=response_format, - input_tools=tools, - ) - - -class CompletionsWithRawResponse: - def __init__(self, completions: Completions) -> None: - self._completions = completions - - self.parse = _legacy_response.to_raw_response_wrapper( - completions.parse, - ) - - -class AsyncCompletionsWithRawResponse: - def __init__(self, completions: AsyncCompletions) -> None: - self._completions = completions - - self.parse = _legacy_response.async_to_raw_response_wrapper( - completions.parse, - ) - - -class CompletionsWithStreamingResponse: - def __init__(self, completions: Completions) -> None: - self._completions = completions - - self.parse = to_streamed_response_wrapper( - completions.parse, - ) - - -class AsyncCompletionsWithStreamingResponse: - def __init__(self, completions: AsyncCompletions) -> None: - self._completions = completions - - self.parse = async_to_streamed_response_wrapper( - completions.parse, - ) diff --git a/portkey_ai/_vendor/openai/resources/beta/realtime/realtime.py b/portkey_ai/_vendor/openai/resources/beta/realtime/realtime.py index 8e1b558c..4fa35963 100644 --- a/portkey_ai/_vendor/openai/resources/beta/realtime/realtime.py +++ b/portkey_ai/_vendor/openai/resources/beta/realtime/realtime.py @@ -358,6 +358,7 @@ async def __aenter__(self) -> AsyncRealtimeConnection: raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc extra_query = self.__extra_query + await self.__client._refresh_api_key() auth_headers = self.__client.auth_headers if is_async_azure_client(self.__client): url, auth_headers = await self.__client._configure_realtime(self.__model, extra_query) @@ -540,6 +541,7 @@ def __enter__(self) -> RealtimeConnection: raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc extra_query = self.__extra_query + self.__client._refresh_api_key() auth_headers = self.__client.auth_headers if is_azure_client(self.__client): url, auth_headers = self.__client._configure_realtime(self.__model, extra_query) @@ -652,8 +654,8 @@ def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | Not """Send this event to cancel an in-progress response. The server will respond - with a `response.cancelled` event or an error if there is no response to - cancel. + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. """ self._connection.send( cast( @@ -904,8 +906,8 @@ async def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str """Send this event to cancel an in-progress response. The server will respond - with a `response.cancelled` event or an error if there is no response to - cancel. + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. """ await self._connection.send( cast( diff --git a/portkey_ai/_vendor/openai/resources/beta/realtime/sessions.py b/portkey_ai/_vendor/openai/resources/beta/realtime/sessions.py index 77f1ec90..eaddb384 100644 --- a/portkey_ai/_vendor/openai/resources/beta/realtime/sessions.py +++ b/portkey_ai/_vendor/openai/resources/beta/realtime/sessions.py @@ -66,9 +66,7 @@ def create( tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN, turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -154,7 +152,7 @@ def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer @@ -163,8 +161,7 @@ def create( voice: The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are - `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, - `shimmer`, and `verse`. + `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. extra_headers: Send extra headers @@ -251,9 +248,7 @@ async def create( tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN, turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -339,7 +334,7 @@ async def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer @@ -348,8 +343,7 @@ async def create( voice: The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are - `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, - `shimmer`, and `verse`. + `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. extra_headers: Send extra headers diff --git a/portkey_ai/_vendor/openai/resources/beta/realtime/transcription_sessions.py b/portkey_ai/_vendor/openai/resources/beta/realtime/transcription_sessions.py index 5f97b3c8..54fe7d5a 100644 --- a/portkey_ai/_vendor/openai/resources/beta/realtime/transcription_sessions.py +++ b/portkey_ai/_vendor/openai/resources/beta/realtime/transcription_sessions.py @@ -96,7 +96,7 @@ def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer @@ -209,7 +209,7 @@ async def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/messages.py b/portkey_ai/_vendor/openai/resources/beta/threads/messages.py index 943d2e7f..8903ff03 100644 --- a/portkey_ai/_vendor/openai/resources/beta/threads/messages.py +++ b/portkey_ai/_vendor/openai/resources/beta/threads/messages.py @@ -600,27 +600,27 @@ def __init__(self, messages: Messages) -> None: self.create = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - messages.create # pyright: ignore[reportDeprecated], + messages.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - messages.retrieve # pyright: ignore[reportDeprecated], + messages.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - messages.update # pyright: ignore[reportDeprecated], + messages.update, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - messages.list # pyright: ignore[reportDeprecated], + messages.list, # pyright: ignore[reportDeprecated], ) ) self.delete = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - messages.delete # pyright: ignore[reportDeprecated], + messages.delete, # pyright: ignore[reportDeprecated], ) ) @@ -631,27 +631,27 @@ def __init__(self, messages: AsyncMessages) -> None: self.create = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - messages.create # pyright: ignore[reportDeprecated], + messages.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - messages.retrieve # pyright: ignore[reportDeprecated], + messages.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - messages.update # pyright: ignore[reportDeprecated], + messages.update, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - messages.list # pyright: ignore[reportDeprecated], + messages.list, # pyright: ignore[reportDeprecated], ) ) self.delete = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - messages.delete # pyright: ignore[reportDeprecated], + messages.delete, # pyright: ignore[reportDeprecated], ) ) @@ -662,27 +662,27 @@ def __init__(self, messages: Messages) -> None: self.create = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - messages.create # pyright: ignore[reportDeprecated], + messages.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - messages.retrieve # pyright: ignore[reportDeprecated], + messages.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - messages.update # pyright: ignore[reportDeprecated], + messages.update, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - messages.list # pyright: ignore[reportDeprecated], + messages.list, # pyright: ignore[reportDeprecated], ) ) self.delete = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - messages.delete # pyright: ignore[reportDeprecated], + messages.delete, # pyright: ignore[reportDeprecated], ) ) @@ -693,26 +693,26 @@ def __init__(self, messages: AsyncMessages) -> None: self.create = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - messages.create # pyright: ignore[reportDeprecated], + messages.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - messages.retrieve # pyright: ignore[reportDeprecated], + messages.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - messages.update # pyright: ignore[reportDeprecated], + messages.update, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - messages.list # pyright: ignore[reportDeprecated], + messages.list, # pyright: ignore[reportDeprecated], ) ) self.delete = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - messages.delete # pyright: ignore[reportDeprecated], + messages.delete, # pyright: ignore[reportDeprecated], ) ) diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py b/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py index 3d9ae975..e97d519a 100644 --- a/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py +++ b/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py @@ -167,12 +167,11 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -221,7 +220,7 @@ def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -322,12 +321,11 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -372,7 +370,7 @@ def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -473,12 +471,11 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -523,7 +520,7 @@ def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1600,12 +1597,11 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1654,7 +1650,7 @@ async def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1755,12 +1751,11 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1805,7 +1800,7 @@ async def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1906,12 +1901,11 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1956,7 +1950,7 @@ async def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -2932,32 +2926,32 @@ def __init__(self, runs: Runs) -> None: self.create = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - runs.create # pyright: ignore[reportDeprecated], + runs.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - runs.retrieve # pyright: ignore[reportDeprecated], + runs.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - runs.update # pyright: ignore[reportDeprecated], + runs.update, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - runs.list # pyright: ignore[reportDeprecated], + runs.list, # pyright: ignore[reportDeprecated], ) ) self.cancel = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - runs.cancel # pyright: ignore[reportDeprecated], + runs.cancel, # pyright: ignore[reportDeprecated], ) ) self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - runs.submit_tool_outputs # pyright: ignore[reportDeprecated], + runs.submit_tool_outputs, # pyright: ignore[reportDeprecated], ) ) @@ -2972,32 +2966,32 @@ def __init__(self, runs: AsyncRuns) -> None: self.create = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - runs.create # pyright: ignore[reportDeprecated], + runs.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - runs.retrieve # pyright: ignore[reportDeprecated], + runs.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - runs.update # pyright: ignore[reportDeprecated], + runs.update, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - runs.list # pyright: ignore[reportDeprecated], + runs.list, # pyright: ignore[reportDeprecated], ) ) self.cancel = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - runs.cancel # pyright: ignore[reportDeprecated], + runs.cancel, # pyright: ignore[reportDeprecated], ) ) self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - runs.submit_tool_outputs # pyright: ignore[reportDeprecated], + runs.submit_tool_outputs, # pyright: ignore[reportDeprecated], ) ) @@ -3012,32 +3006,32 @@ def __init__(self, runs: Runs) -> None: self.create = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - runs.create # pyright: ignore[reportDeprecated], + runs.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - runs.retrieve # pyright: ignore[reportDeprecated], + runs.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - runs.update # pyright: ignore[reportDeprecated], + runs.update, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - runs.list # pyright: ignore[reportDeprecated], + runs.list, # pyright: ignore[reportDeprecated], ) ) self.cancel = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - runs.cancel # pyright: ignore[reportDeprecated], + runs.cancel, # pyright: ignore[reportDeprecated], ) ) self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - runs.submit_tool_outputs # pyright: ignore[reportDeprecated], + runs.submit_tool_outputs, # pyright: ignore[reportDeprecated], ) ) @@ -3052,32 +3046,32 @@ def __init__(self, runs: AsyncRuns) -> None: self.create = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - runs.create # pyright: ignore[reportDeprecated], + runs.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - runs.retrieve # pyright: ignore[reportDeprecated], + runs.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - runs.update # pyright: ignore[reportDeprecated], + runs.update, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - runs.list # pyright: ignore[reportDeprecated], + runs.list, # pyright: ignore[reportDeprecated], ) ) self.cancel = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - runs.cancel # pyright: ignore[reportDeprecated], + runs.cancel, # pyright: ignore[reportDeprecated], ) ) self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - runs.submit_tool_outputs # pyright: ignore[reportDeprecated], + runs.submit_tool_outputs, # pyright: ignore[reportDeprecated], ) ) diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py b/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py index eebb2003..8e34210b 100644 --- a/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py +++ b/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py @@ -341,12 +341,12 @@ def __init__(self, steps: Steps) -> None: self.retrieve = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - steps.retrieve # pyright: ignore[reportDeprecated], + steps.retrieve, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - steps.list # pyright: ignore[reportDeprecated], + steps.list, # pyright: ignore[reportDeprecated], ) ) @@ -357,12 +357,12 @@ def __init__(self, steps: AsyncSteps) -> None: self.retrieve = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - steps.retrieve # pyright: ignore[reportDeprecated], + steps.retrieve, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - steps.list # pyright: ignore[reportDeprecated], + steps.list, # pyright: ignore[reportDeprecated], ) ) @@ -373,12 +373,12 @@ def __init__(self, steps: Steps) -> None: self.retrieve = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - steps.retrieve # pyright: ignore[reportDeprecated], + steps.retrieve, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - steps.list # pyright: ignore[reportDeprecated], + steps.list, # pyright: ignore[reportDeprecated], ) ) @@ -389,11 +389,11 @@ def __init__(self, steps: AsyncSteps) -> None: self.retrieve = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - steps.retrieve # pyright: ignore[reportDeprecated], + steps.retrieve, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - steps.list # pyright: ignore[reportDeprecated], + steps.list, # pyright: ignore[reportDeprecated], ) ) diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/threads.py b/portkey_ai/_vendor/openai/resources/beta/threads/threads.py index ff2a4115..7121851c 100644 --- a/portkey_ai/_vendor/openai/resources/beta/threads/threads.py +++ b/portkey_ai/_vendor/openai/resources/beta/threads/threads.py @@ -393,7 +393,7 @@ def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -527,7 +527,7 @@ def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -661,7 +661,7 @@ def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1251,7 +1251,7 @@ async def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1385,7 +1385,7 @@ async def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1519,7 +1519,7 @@ async def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1785,27 +1785,27 @@ def __init__(self, threads: Threads) -> None: self.create = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - threads.create # pyright: ignore[reportDeprecated], + threads.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - threads.retrieve # pyright: ignore[reportDeprecated], + threads.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - threads.update # pyright: ignore[reportDeprecated], + threads.update, # pyright: ignore[reportDeprecated], ) ) self.delete = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - threads.delete # pyright: ignore[reportDeprecated], + threads.delete, # pyright: ignore[reportDeprecated], ) ) self.create_and_run = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - threads.create_and_run # pyright: ignore[reportDeprecated], + threads.create_and_run, # pyright: ignore[reportDeprecated], ) ) @@ -1824,27 +1824,27 @@ def __init__(self, threads: AsyncThreads) -> None: self.create = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - threads.create # pyright: ignore[reportDeprecated], + threads.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - threads.retrieve # pyright: ignore[reportDeprecated], + threads.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - threads.update # pyright: ignore[reportDeprecated], + threads.update, # pyright: ignore[reportDeprecated], ) ) self.delete = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - threads.delete # pyright: ignore[reportDeprecated], + threads.delete, # pyright: ignore[reportDeprecated], ) ) self.create_and_run = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - threads.create_and_run # pyright: ignore[reportDeprecated], + threads.create_and_run, # pyright: ignore[reportDeprecated], ) ) @@ -1863,27 +1863,27 @@ def __init__(self, threads: Threads) -> None: self.create = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - threads.create # pyright: ignore[reportDeprecated], + threads.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - threads.retrieve # pyright: ignore[reportDeprecated], + threads.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - threads.update # pyright: ignore[reportDeprecated], + threads.update, # pyright: ignore[reportDeprecated], ) ) self.delete = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - threads.delete # pyright: ignore[reportDeprecated], + threads.delete, # pyright: ignore[reportDeprecated], ) ) self.create_and_run = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - threads.create_and_run # pyright: ignore[reportDeprecated], + threads.create_and_run, # pyright: ignore[reportDeprecated], ) ) @@ -1902,27 +1902,27 @@ def __init__(self, threads: AsyncThreads) -> None: self.create = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - threads.create # pyright: ignore[reportDeprecated], + threads.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - threads.retrieve # pyright: ignore[reportDeprecated], + threads.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - threads.update # pyright: ignore[reportDeprecated], + threads.update, # pyright: ignore[reportDeprecated], ) ) self.delete = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - threads.delete # pyright: ignore[reportDeprecated], + threads.delete, # pyright: ignore[reportDeprecated], ) ) self.create_and_run = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - threads.create_and_run # pyright: ignore[reportDeprecated], + threads.create_and_run, # pyright: ignore[reportDeprecated], ) ) diff --git a/portkey_ai/_vendor/openai/resources/chat/completions/completions.py b/portkey_ai/_vendor/openai/resources/chat/completions/completions.py index a2a664ac..168cf04d 100644 --- a/portkey_ai/_vendor/openai/resources/chat/completions/completions.py +++ b/portkey_ai/_vendor/openai/resources/chat/completions/completions.py @@ -3,7 +3,8 @@ from __future__ import annotations import inspect -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, List, Type, Union, Iterable, Optional, cast +from functools import partial from typing_extensions import Literal, overload import httpx @@ -18,7 +19,7 @@ MessagesWithStreamingResponse, AsyncMessagesWithStreamingResponse, ) -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ...._utils import required_args, maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -32,15 +33,23 @@ completion_update_params, ) from ...._base_client import AsyncPaginator, make_request_options +from ....lib._parsing import ( + ResponseFormatT, + validate_input_tools as _validate_input_tools, + parse_chat_completion as _parse_chat_completion, + type_to_response_format_param as _type_to_response_format, +) +from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager from ....types.shared.chat_model import ChatModel from ....types.chat.chat_completion import ChatCompletion from ....types.shared_params.metadata import Metadata from ....types.shared.reasoning_effort import ReasoningEffort from ....types.chat.chat_completion_chunk import ChatCompletionChunk +from ....types.chat.parsed_chat_completion import ParsedChatCompletion from ....types.chat.chat_completion_deleted import ChatCompletionDeleted -from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam +from ....types.chat.chat_completion_tool_union_param import ChatCompletionToolUnionParam from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam @@ -72,6 +81,159 @@ def with_streaming_response(self) -> CompletionsWithStreamingResponse: """ return CompletionsWithStreamingResponse(self) + def parse( + self, + *, + messages: Iterable[ChatCompletionMessageParam], + model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, + response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ParsedChatCompletion[ResponseFormatT]: + """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types + & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class. + + You can pass a pydantic model to this method and it will automatically convert the model + into a JSON schema, send it to the API and parse the response content back into the given model. + + This method will also automatically parse `function` tool calls if: + - You use the `openai.pydantic_function_tool()` helper method + - You mark your tool schema with `"strict": True` + + Example usage: + ```py + from pydantic import BaseModel + from openai import OpenAI + + + class Step(BaseModel): + explanation: str + output: str + + + class MathResponse(BaseModel): + steps: List[Step] + final_answer: str + + + client = OpenAI() + completion = client.chat.completions.parse( + model="gpt-4o-2024-08-06", + messages=[ + {"role": "system", "content": "You are a helpful math tutor."}, + {"role": "user", "content": "solve 8x + 31 = 2"}, + ], + response_format=MathResponse, + ) + + message = completion.choices[0].message + if message.parsed: + print(message.parsed.steps) + print("answer: ", message.parsed.final_answer) + ``` + """ + chat_completion_tools = _validate_input_tools(tools) + + extra_headers = { + "X-Stainless-Helper-Method": "chat.completions.parse", + **(extra_headers or {}), + } + + def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]: + return _parse_chat_completion( + response_format=response_format, + chat_completion=raw_completion, + input_tools=chat_completion_tools, + ) + + return self._post( + "/chat/completions", + body=maybe_transform( + { + "messages": messages, + "model": model, + "audio": audio, + "frequency_penalty": frequency_penalty, + "function_call": function_call, + "functions": functions, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "modalities": modalities, + "n": n, + "parallel_tool_calls": parallel_tool_calls, + "prediction": prediction, + "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, + "reasoning_effort": reasoning_effort, + "response_format": _type_to_response_format(response_format), + "safety_identifier": safety_identifier, + "seed": seed, + "service_tier": service_tier, + "stop": stop, + "store": store, + "stream": False, + "stream_options": stream_options, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + "verbosity": verbosity, + "web_search_options": web_search_options, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=parser, + ), + # we turn the `ChatCompletion` instance into a `ParsedChatCompletion` + # in the `parser` function above + cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion), + stream=False, + ) + @overload def create( self, @@ -92,20 +254,23 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -232,12 +397,15 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o-series models only** + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -250,29 +418,34 @@ def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. stop: Not supported with latest reasoning models `o3` and `o4-mini`. @@ -283,6 +456,8 @@ def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. + stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -309,9 +484,9 @@ def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -323,9 +498,15 @@ def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -362,19 +543,22 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -510,12 +694,15 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o-series models only** + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -528,29 +715,34 @@ def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. stop: Not supported with latest reasoning models `o3` and `o4-mini`. @@ -561,6 +753,8 @@ def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -578,9 +772,9 @@ def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -592,9 +786,15 @@ def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -631,19 +831,22 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -779,12 +982,15 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o-series models only** + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -797,29 +1003,34 @@ def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. stop: Not supported with latest reasoning models `o3` and `o4-mini`. @@ -830,6 +1041,8 @@ def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -847,9 +1060,9 @@ def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -861,9 +1074,15 @@ def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -899,20 +1118,23 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -942,8 +1164,10 @@ def create( "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": response_format, + "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, "stop": stop, @@ -956,6 +1180,7 @@ def create( "top_logprobs": top_logprobs, "top_p": top_p, "user": user, + "verbosity": verbosity, "web_search_options": web_search_options, }, completion_create_params.CompletionCreateParamsStreaming @@ -1150,6 +1375,123 @@ def delete( cast_to=ChatCompletionDeleted, ) + def stream( + self, + *, + messages: Iterable[ChatCompletionMessageParam], + model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, + response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChatCompletionStreamManager[ResponseFormatT]: + """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API + and automatic accumulation of each delta. + + This also supports all of the parsing utilities that `.parse()` does. + + Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response: + + ```py + with client.chat.completions.stream( + model="gpt-4o-2024-08-06", + messages=[...], + ) as stream: + for event in stream: + if event.type == "content.delta": + print(event.delta, flush=True, end="") + ``` + + When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events). + + When the context manager exits, the response will be closed, however the `stream` instance is still available outside + the context manager. + """ + extra_headers = { + "X-Stainless-Helper-Method": "chat.completions.stream", + **(extra_headers or {}), + } + + api_request: partial[Stream[ChatCompletionChunk]] = partial( + self.create, + messages=messages, + model=model, + audio=audio, + stream=True, + response_format=_type_to_response_format(response_format), + frequency_penalty=frequency_penalty, + function_call=function_call, + functions=functions, + logit_bias=logit_bias, + logprobs=logprobs, + max_completion_tokens=max_completion_tokens, + max_tokens=max_tokens, + metadata=metadata, + modalities=modalities, + n=n, + parallel_tool_calls=parallel_tool_calls, + prediction=prediction, + presence_penalty=presence_penalty, + prompt_cache_key=prompt_cache_key, + reasoning_effort=reasoning_effort, + safety_identifier=safety_identifier, + seed=seed, + service_tier=service_tier, + store=store, + stop=stop, + stream_options=stream_options, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_logprobs=top_logprobs, + top_p=top_p, + user=user, + verbosity=verbosity, + web_search_options=web_search_options, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + return ChatCompletionStreamManager( + api_request, + response_format=response_format, + input_tools=tools, + ) + class AsyncCompletions(AsyncAPIResource): @cached_property @@ -1175,6 +1517,159 @@ def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse: """ return AsyncCompletionsWithStreamingResponse(self) + async def parse( + self, + *, + messages: Iterable[ChatCompletionMessageParam], + model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, + response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ParsedChatCompletion[ResponseFormatT]: + """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types + & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class. + + You can pass a pydantic model to this method and it will automatically convert the model + into a JSON schema, send it to the API and parse the response content back into the given model. + + This method will also automatically parse `function` tool calls if: + - You use the `openai.pydantic_function_tool()` helper method + - You mark your tool schema with `"strict": True` + + Example usage: + ```py + from pydantic import BaseModel + from openai import AsyncOpenAI + + + class Step(BaseModel): + explanation: str + output: str + + + class MathResponse(BaseModel): + steps: List[Step] + final_answer: str + + + client = AsyncOpenAI() + completion = await client.chat.completions.parse( + model="gpt-4o-2024-08-06", + messages=[ + {"role": "system", "content": "You are a helpful math tutor."}, + {"role": "user", "content": "solve 8x + 31 = 2"}, + ], + response_format=MathResponse, + ) + + message = completion.choices[0].message + if message.parsed: + print(message.parsed.steps) + print("answer: ", message.parsed.final_answer) + ``` + """ + _validate_input_tools(tools) + + extra_headers = { + "X-Stainless-Helper-Method": "chat.completions.parse", + **(extra_headers or {}), + } + + def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]: + return _parse_chat_completion( + response_format=response_format, + chat_completion=raw_completion, + input_tools=tools, + ) + + return await self._post( + "/chat/completions", + body=await async_maybe_transform( + { + "messages": messages, + "model": model, + "audio": audio, + "frequency_penalty": frequency_penalty, + "function_call": function_call, + "functions": functions, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "modalities": modalities, + "n": n, + "parallel_tool_calls": parallel_tool_calls, + "prediction": prediction, + "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, + "reasoning_effort": reasoning_effort, + "response_format": _type_to_response_format(response_format), + "safety_identifier": safety_identifier, + "seed": seed, + "service_tier": service_tier, + "store": store, + "stop": stop, + "stream": False, + "stream_options": stream_options, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + "verbosity": verbosity, + "web_search_options": web_search_options, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=parser, + ), + # we turn the `ChatCompletion` instance into a `ParsedChatCompletion` + # in the `parser` function above + cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion), + stream=False, + ) + @overload async def create( self, @@ -1195,20 +1690,23 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1335,12 +1833,15 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o-series models only** + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -1353,29 +1854,34 @@ async def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. stop: Not supported with latest reasoning models `o3` and `o4-mini`. @@ -1386,6 +1892,8 @@ async def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. + stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -1412,9 +1920,9 @@ async def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -1426,9 +1934,15 @@ async def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -1465,19 +1979,22 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1613,12 +2130,15 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o-series models only** + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -1631,29 +2151,34 @@ async def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. stop: Not supported with latest reasoning models `o3` and `o4-mini`. @@ -1664,6 +2189,8 @@ async def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -1681,9 +2208,9 @@ async def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -1695,9 +2222,15 @@ async def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -1734,19 +2267,22 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1882,12 +2418,15 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - reasoning_effort: **o-series models only** + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -1900,29 +2439,34 @@ async def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. stop: Not supported with latest reasoning models `o3` and `o4-mini`. @@ -1933,6 +2477,8 @@ async def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -1950,9 +2496,9 @@ async def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -1964,9 +2510,15 @@ async def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -2002,20 +2554,23 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2045,8 +2600,10 @@ async def create( "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": response_format, + "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, "stop": stop, @@ -2059,6 +2616,7 @@ async def create( "top_logprobs": top_logprobs, "top_p": top_p, "user": user, + "verbosity": verbosity, "web_search_options": web_search_options, }, completion_create_params.CompletionCreateParamsStreaming @@ -2253,11 +2811,132 @@ async def delete( cast_to=ChatCompletionDeleted, ) + def stream( + self, + *, + messages: Iterable[ChatCompletionMessageParam], + model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, + response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, + functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + modalities: Optional[List[Literal["text", "audio"]]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, + reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + seed: Optional[int] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, + web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncChatCompletionStreamManager[ResponseFormatT]: + """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API + and automatic accumulation of each delta. + + This also supports all of the parsing utilities that `.parse()` does. + + Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response: + + ```py + async with client.chat.completions.stream( + model="gpt-4o-2024-08-06", + messages=[...], + ) as stream: + async for event in stream: + if event.type == "content.delta": + print(event.delta, flush=True, end="") + ``` + + When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events). + + When the context manager exits, the response will be closed, however the `stream` instance is still available outside + the context manager. + """ + _validate_input_tools(tools) + + extra_headers = { + "X-Stainless-Helper-Method": "chat.completions.stream", + **(extra_headers or {}), + } + + api_request = self.create( + messages=messages, + model=model, + audio=audio, + stream=True, + response_format=_type_to_response_format(response_format), + frequency_penalty=frequency_penalty, + function_call=function_call, + functions=functions, + logit_bias=logit_bias, + logprobs=logprobs, + max_completion_tokens=max_completion_tokens, + max_tokens=max_tokens, + metadata=metadata, + modalities=modalities, + n=n, + parallel_tool_calls=parallel_tool_calls, + prediction=prediction, + presence_penalty=presence_penalty, + prompt_cache_key=prompt_cache_key, + reasoning_effort=reasoning_effort, + safety_identifier=safety_identifier, + seed=seed, + service_tier=service_tier, + stop=stop, + store=store, + stream_options=stream_options, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_logprobs=top_logprobs, + top_p=top_p, + user=user, + verbosity=verbosity, + web_search_options=web_search_options, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + return AsyncChatCompletionStreamManager( + api_request, + response_format=response_format, + input_tools=tools, + ) + class CompletionsWithRawResponse: def __init__(self, completions: Completions) -> None: self._completions = completions + self.parse = _legacy_response.to_raw_response_wrapper( + completions.parse, + ) self.create = _legacy_response.to_raw_response_wrapper( completions.create, ) @@ -2283,6 +2962,9 @@ class AsyncCompletionsWithRawResponse: def __init__(self, completions: AsyncCompletions) -> None: self._completions = completions + self.parse = _legacy_response.async_to_raw_response_wrapper( + completions.parse, + ) self.create = _legacy_response.async_to_raw_response_wrapper( completions.create, ) @@ -2308,6 +2990,9 @@ class CompletionsWithStreamingResponse: def __init__(self, completions: Completions) -> None: self._completions = completions + self.parse = to_streamed_response_wrapper( + completions.parse, + ) self.create = to_streamed_response_wrapper( completions.create, ) @@ -2333,6 +3018,9 @@ class AsyncCompletionsWithStreamingResponse: def __init__(self, completions: AsyncCompletions) -> None: self._completions = completions + self.parse = async_to_streamed_response_wrapper( + completions.parse, + ) self.create = async_to_streamed_response_wrapper( completions.create, ) @@ -2357,5 +3045,5 @@ def messages(self) -> AsyncMessagesWithStreamingResponse: def validate_response_format(response_format: object) -> None: if inspect.isclass(response_format) and issubclass(response_format, pydantic.BaseModel): raise TypeError( - "You tried to pass a `BaseModel` class to `chat.completions.create()`; You must use `beta.chat.completions.parse()` instead" + "You tried to pass a `BaseModel` class to `chat.completions.create()`; You must use `chat.completions.parse()` instead" ) diff --git a/portkey_ai/_vendor/openai/resources/completions.py b/portkey_ai/_vendor/openai/resources/completions.py index 43b923b9..97a84575 100644 --- a/portkey_ai/_vendor/openai/resources/completions.py +++ b/portkey_ai/_vendor/openai/resources/completions.py @@ -2,14 +2,14 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, overload import httpx from .. import _legacy_response from ..types import completion_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from .._utils import required_args, maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource @@ -49,7 +49,7 @@ def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], + prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -59,7 +59,7 @@ def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, @@ -204,7 +204,7 @@ def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], + prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], stream: Literal[True], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, @@ -215,7 +215,7 @@ def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -359,7 +359,7 @@ def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], + prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], stream: bool, best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, @@ -370,7 +370,7 @@ def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -514,7 +514,7 @@ def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], + prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -524,7 +524,7 @@ def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, @@ -599,7 +599,7 @@ async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], + prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -609,7 +609,7 @@ async def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, @@ -754,7 +754,7 @@ async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], + prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], stream: Literal[True], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, @@ -765,7 +765,7 @@ async def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -909,7 +909,7 @@ async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], + prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], stream: bool, best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, @@ -920,7 +920,7 @@ async def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1064,7 +1064,7 @@ async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], - prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], + prompt: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, @@ -1074,7 +1074,7 @@ async def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], SequenceNotStr[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, diff --git a/portkey_ai/_vendor/openai/resources/containers/containers.py b/portkey_ai/_vendor/openai/resources/containers/containers.py index 71e5e6b0..30e9eff1 100644 --- a/portkey_ai/_vendor/openai/resources/containers/containers.py +++ b/portkey_ai/_vendor/openai/resources/containers/containers.py @@ -2,14 +2,13 @@ from __future__ import annotations -from typing import List from typing_extensions import Literal import httpx from ... import _legacy_response from ...types import container_list_params, container_create_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -60,7 +59,7 @@ def create( *, name: str, expires_after: container_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, - file_ids: List[str] | NotGiven = NOT_GIVEN, + file_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -256,7 +255,7 @@ async def create( *, name: str, expires_after: container_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, - file_ids: List[str] | NotGiven = NOT_GIVEN, + file_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/portkey_ai/_vendor/openai/resources/conversations/__init__.py b/portkey_ai/_vendor/openai/resources/conversations/__init__.py new file mode 100644 index 00000000..c6c4fd6e --- /dev/null +++ b/portkey_ai/_vendor/openai/resources/conversations/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .items import ( + Items, + AsyncItems, + ItemsWithRawResponse, + AsyncItemsWithRawResponse, + ItemsWithStreamingResponse, + AsyncItemsWithStreamingResponse, +) +from .conversations import ( + Conversations, + AsyncConversations, + ConversationsWithRawResponse, + AsyncConversationsWithRawResponse, + ConversationsWithStreamingResponse, + AsyncConversationsWithStreamingResponse, +) + +__all__ = [ + "Items", + "AsyncItems", + "ItemsWithRawResponse", + "AsyncItemsWithRawResponse", + "ItemsWithStreamingResponse", + "AsyncItemsWithStreamingResponse", + "Conversations", + "AsyncConversations", + "ConversationsWithRawResponse", + "AsyncConversationsWithRawResponse", + "ConversationsWithStreamingResponse", + "AsyncConversationsWithStreamingResponse", +] diff --git a/portkey_ai/_vendor/openai/resources/conversations/conversations.py b/portkey_ai/_vendor/openai/resources/conversations/conversations.py new file mode 100644 index 00000000..802620e6 --- /dev/null +++ b/portkey_ai/_vendor/openai/resources/conversations/conversations.py @@ -0,0 +1,474 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Iterable, Optional + +import httpx + +from ... import _legacy_response +from .items import ( + Items, + AsyncItems, + ItemsWithRawResponse, + AsyncItemsWithRawResponse, + ItemsWithStreamingResponse, + AsyncItemsWithStreamingResponse, +) +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ..._base_client import make_request_options +from ...types.conversations import conversation_create_params, conversation_update_params +from ...types.shared_params.metadata import Metadata +from ...types.conversations.conversation import Conversation +from ...types.responses.response_input_item_param import ResponseInputItemParam +from ...types.conversations.conversation_deleted_resource import ConversationDeletedResource + +__all__ = ["Conversations", "AsyncConversations"] + + +class Conversations(SyncAPIResource): + @cached_property + def items(self) -> Items: + return Items(self._client) + + @cached_property + def with_raw_response(self) -> ConversationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return ConversationsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ConversationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return ConversationsWithStreamingResponse(self) + + def create( + self, + *, + items: Optional[Iterable[ResponseInputItemParam]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Create a conversation. + + Args: + items: Initial items to include in the conversation context. You may add up to 20 items + at a time. + + metadata: Set of 16 key-value pairs that can be attached to an object. Useful for storing + additional information about the object in a structured format. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/conversations", + body=maybe_transform( + { + "items": items, + "metadata": metadata, + }, + conversation_create_params.ConversationCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + def retrieve( + self, + conversation_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Get a conversation with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._get( + f"/conversations/{conversation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + def update( + self, + conversation_id: str, + *, + metadata: Dict[str, str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Update a conversation's metadata with the given ID. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._post( + f"/conversations/{conversation_id}", + body=maybe_transform({"metadata": metadata}, conversation_update_params.ConversationUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + def delete( + self, + conversation_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationDeletedResource: + """ + Delete a conversation with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._delete( + f"/conversations/{conversation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConversationDeletedResource, + ) + + +class AsyncConversations(AsyncAPIResource): + @cached_property + def items(self) -> AsyncItems: + return AsyncItems(self._client) + + @cached_property + def with_raw_response(self) -> AsyncConversationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncConversationsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncConversationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncConversationsWithStreamingResponse(self) + + async def create( + self, + *, + items: Optional[Iterable[ResponseInputItemParam]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Create a conversation. + + Args: + items: Initial items to include in the conversation context. You may add up to 20 items + at a time. + + metadata: Set of 16 key-value pairs that can be attached to an object. Useful for storing + additional information about the object in a structured format. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/conversations", + body=await async_maybe_transform( + { + "items": items, + "metadata": metadata, + }, + conversation_create_params.ConversationCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + async def retrieve( + self, + conversation_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Get a conversation with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return await self._get( + f"/conversations/{conversation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + async def update( + self, + conversation_id: str, + *, + metadata: Dict[str, str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Update a conversation's metadata with the given ID. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return await self._post( + f"/conversations/{conversation_id}", + body=await async_maybe_transform( + {"metadata": metadata}, conversation_update_params.ConversationUpdateParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + async def delete( + self, + conversation_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationDeletedResource: + """ + Delete a conversation with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return await self._delete( + f"/conversations/{conversation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConversationDeletedResource, + ) + + +class ConversationsWithRawResponse: + def __init__(self, conversations: Conversations) -> None: + self._conversations = conversations + + self.create = _legacy_response.to_raw_response_wrapper( + conversations.create, + ) + self.retrieve = _legacy_response.to_raw_response_wrapper( + conversations.retrieve, + ) + self.update = _legacy_response.to_raw_response_wrapper( + conversations.update, + ) + self.delete = _legacy_response.to_raw_response_wrapper( + conversations.delete, + ) + + @cached_property + def items(self) -> ItemsWithRawResponse: + return ItemsWithRawResponse(self._conversations.items) + + +class AsyncConversationsWithRawResponse: + def __init__(self, conversations: AsyncConversations) -> None: + self._conversations = conversations + + self.create = _legacy_response.async_to_raw_response_wrapper( + conversations.create, + ) + self.retrieve = _legacy_response.async_to_raw_response_wrapper( + conversations.retrieve, + ) + self.update = _legacy_response.async_to_raw_response_wrapper( + conversations.update, + ) + self.delete = _legacy_response.async_to_raw_response_wrapper( + conversations.delete, + ) + + @cached_property + def items(self) -> AsyncItemsWithRawResponse: + return AsyncItemsWithRawResponse(self._conversations.items) + + +class ConversationsWithStreamingResponse: + def __init__(self, conversations: Conversations) -> None: + self._conversations = conversations + + self.create = to_streamed_response_wrapper( + conversations.create, + ) + self.retrieve = to_streamed_response_wrapper( + conversations.retrieve, + ) + self.update = to_streamed_response_wrapper( + conversations.update, + ) + self.delete = to_streamed_response_wrapper( + conversations.delete, + ) + + @cached_property + def items(self) -> ItemsWithStreamingResponse: + return ItemsWithStreamingResponse(self._conversations.items) + + +class AsyncConversationsWithStreamingResponse: + def __init__(self, conversations: AsyncConversations) -> None: + self._conversations = conversations + + self.create = async_to_streamed_response_wrapper( + conversations.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + conversations.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + conversations.update, + ) + self.delete = async_to_streamed_response_wrapper( + conversations.delete, + ) + + @cached_property + def items(self) -> AsyncItemsWithStreamingResponse: + return AsyncItemsWithStreamingResponse(self._conversations.items) diff --git a/portkey_ai/_vendor/openai/resources/conversations/items.py b/portkey_ai/_vendor/openai/resources/conversations/items.py new file mode 100644 index 00000000..01811f95 --- /dev/null +++ b/portkey_ai/_vendor/openai/resources/conversations/items.py @@ -0,0 +1,557 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Any, List, Iterable, cast +from typing_extensions import Literal + +import httpx + +from ... import _legacy_response +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ...pagination import SyncConversationCursorPage, AsyncConversationCursorPage +from ..._base_client import AsyncPaginator, make_request_options +from ...types.conversations import item_list_params, item_create_params, item_retrieve_params +from ...types.conversations.conversation import Conversation +from ...types.responses.response_includable import ResponseIncludable +from ...types.conversations.conversation_item import ConversationItem +from ...types.responses.response_input_item_param import ResponseInputItemParam +from ...types.conversations.conversation_item_list import ConversationItemList + +__all__ = ["Items", "AsyncItems"] + + +class Items(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ItemsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return ItemsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ItemsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return ItemsWithStreamingResponse(self) + + def create( + self, + conversation_id: str, + *, + items: Iterable[ResponseInputItemParam], + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationItemList: + """ + Create items in a conversation with the given ID. + + Args: + items: The items to add to the conversation. You may add up to 20 items at a time. + + include: Additional fields to include in the response. See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._post( + f"/conversations/{conversation_id}/items", + body=maybe_transform({"items": items}, item_create_params.ItemCreateParams), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, item_create_params.ItemCreateParams), + ), + cast_to=ConversationItemList, + ) + + def retrieve( + self, + item_id: str, + *, + conversation_id: str, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationItem: + """ + Get a single item from a conversation with the given IDs. + + Args: + include: Additional fields to include in the response. See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + if not item_id: + raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}") + return cast( + ConversationItem, + self._get( + f"/conversations/{conversation_id}/items/{item_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams), + ), + cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system + ), + ) + + def list( + self, + conversation_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncConversationCursorPage[ConversationItem]: + """ + List all items for a conversation with the given ID. + + Args: + after: An item ID to list items after, used in pagination. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `web_search_call.action.sources`: Include the sources of the web search tool + call. + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: The order to return the input items in. Default is `desc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._get_api_list( + f"/conversations/{conversation_id}/items", + page=SyncConversationCursorPage[ConversationItem], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "include": include, + "limit": limit, + "order": order, + }, + item_list_params.ItemListParams, + ), + ), + model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system + ) + + def delete( + self, + item_id: str, + *, + conversation_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Delete an item from a conversation with the given IDs. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + if not item_id: + raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}") + return self._delete( + f"/conversations/{conversation_id}/items/{item_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + +class AsyncItems(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncItemsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncItemsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncItemsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncItemsWithStreamingResponse(self) + + async def create( + self, + conversation_id: str, + *, + items: Iterable[ResponseInputItemParam], + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationItemList: + """ + Create items in a conversation with the given ID. + + Args: + items: The items to add to the conversation. You may add up to 20 items at a time. + + include: Additional fields to include in the response. See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return await self._post( + f"/conversations/{conversation_id}/items", + body=await async_maybe_transform({"items": items}, item_create_params.ItemCreateParams), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"include": include}, item_create_params.ItemCreateParams), + ), + cast_to=ConversationItemList, + ) + + async def retrieve( + self, + item_id: str, + *, + conversation_id: str, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationItem: + """ + Get a single item from a conversation with the given IDs. + + Args: + include: Additional fields to include in the response. See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + if not item_id: + raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}") + return cast( + ConversationItem, + await self._get( + f"/conversations/{conversation_id}/items/{item_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams), + ), + cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system + ), + ) + + def list( + self, + conversation_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[ConversationItem, AsyncConversationCursorPage[ConversationItem]]: + """ + List all items for a conversation with the given ID. + + Args: + after: An item ID to list items after, used in pagination. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `web_search_call.action.sources`: Include the sources of the web search tool + call. + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: The order to return the input items in. Default is `desc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._get_api_list( + f"/conversations/{conversation_id}/items", + page=AsyncConversationCursorPage[ConversationItem], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "include": include, + "limit": limit, + "order": order, + }, + item_list_params.ItemListParams, + ), + ), + model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system + ) + + async def delete( + self, + item_id: str, + *, + conversation_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Delete an item from a conversation with the given IDs. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + if not item_id: + raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}") + return await self._delete( + f"/conversations/{conversation_id}/items/{item_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + +class ItemsWithRawResponse: + def __init__(self, items: Items) -> None: + self._items = items + + self.create = _legacy_response.to_raw_response_wrapper( + items.create, + ) + self.retrieve = _legacy_response.to_raw_response_wrapper( + items.retrieve, + ) + self.list = _legacy_response.to_raw_response_wrapper( + items.list, + ) + self.delete = _legacy_response.to_raw_response_wrapper( + items.delete, + ) + + +class AsyncItemsWithRawResponse: + def __init__(self, items: AsyncItems) -> None: + self._items = items + + self.create = _legacy_response.async_to_raw_response_wrapper( + items.create, + ) + self.retrieve = _legacy_response.async_to_raw_response_wrapper( + items.retrieve, + ) + self.list = _legacy_response.async_to_raw_response_wrapper( + items.list, + ) + self.delete = _legacy_response.async_to_raw_response_wrapper( + items.delete, + ) + + +class ItemsWithStreamingResponse: + def __init__(self, items: Items) -> None: + self._items = items + + self.create = to_streamed_response_wrapper( + items.create, + ) + self.retrieve = to_streamed_response_wrapper( + items.retrieve, + ) + self.list = to_streamed_response_wrapper( + items.list, + ) + self.delete = to_streamed_response_wrapper( + items.delete, + ) + + +class AsyncItemsWithStreamingResponse: + def __init__(self, items: AsyncItems) -> None: + self._items = items + + self.create = async_to_streamed_response_wrapper( + items.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + items.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + items.list, + ) + self.delete = async_to_streamed_response_wrapper( + items.delete, + ) diff --git a/portkey_ai/_vendor/openai/resources/embeddings.py b/portkey_ai/_vendor/openai/resources/embeddings.py index 553dacc2..a8cf1798 100644 --- a/portkey_ai/_vendor/openai/resources/embeddings.py +++ b/portkey_ai/_vendor/openai/resources/embeddings.py @@ -4,14 +4,14 @@ import array import base64 -from typing import List, Union, Iterable, cast +from typing import Union, Iterable, cast from typing_extensions import Literal import httpx from .. import _legacy_response from ..types import embedding_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from .._utils import is_given, maybe_transform from .._compat import cached_property from .._extras import numpy as np, has_numpy @@ -47,7 +47,7 @@ def with_streaming_response(self) -> EmbeddingsWithStreamingResponse: def create( self, *, - input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], + input: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]]], model: Union[str, EmbeddingModel], dimensions: int | NotGiven = NOT_GIVEN, encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, @@ -112,6 +112,9 @@ def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse: # don't modify the response object if a user explicitly asked for a format return obj + if not obj.data: + raise ValueError("No embedding data received") + for embedding in obj.data: data = cast(object, embedding.embedding) if not isinstance(data, str): @@ -163,7 +166,7 @@ def with_streaming_response(self) -> AsyncEmbeddingsWithStreamingResponse: async def create( self, *, - input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], + input: Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]]], model: Union[str, EmbeddingModel], dimensions: int | NotGiven = NOT_GIVEN, encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, @@ -228,6 +231,9 @@ def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse: # don't modify the response object if a user explicitly asked for a format return obj + if not obj.data: + raise ValueError("No embedding data received") + for embedding in obj.data: data = cast(object, embedding.embedding) if not isinstance(data, str): diff --git a/portkey_ai/_vendor/openai/resources/files.py b/portkey_ai/_vendor/openai/resources/files.py index 179af870..963c3c0a 100644 --- a/portkey_ai/_vendor/openai/resources/files.py +++ b/portkey_ai/_vendor/openai/resources/files.py @@ -57,6 +57,7 @@ def create( *, file: FileTypes, purpose: FilePurpose, + expires_after: file_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -68,7 +69,7 @@ def create( Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up - to 100 GB. + to 1 TB. The Assistants API supports files up to 2 million tokens and of specific file types. See the @@ -96,6 +97,9 @@ def create( fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used for eval data sets + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -108,6 +112,7 @@ def create( { "file": file, "purpose": purpose, + "expires_after": expires_after, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) @@ -369,6 +374,7 @@ async def create( *, file: FileTypes, purpose: FilePurpose, + expires_after: file_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -380,7 +386,7 @@ async def create( Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up - to 100 GB. + to 1 TB. The Assistants API supports files up to 2 million tokens and of specific file types. See the @@ -408,6 +414,9 @@ async def create( fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used for eval data sets + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -420,6 +429,7 @@ async def create( { "file": file, "purpose": purpose, + "expires_after": expires_after, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) @@ -677,7 +687,7 @@ def __init__(self, files: Files) -> None: ) self.retrieve_content = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( - files.retrieve_content # pyright: ignore[reportDeprecated], + files.retrieve_content, # pyright: ignore[reportDeprecated], ) ) @@ -703,7 +713,7 @@ def __init__(self, files: AsyncFiles) -> None: ) self.retrieve_content = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( - files.retrieve_content # pyright: ignore[reportDeprecated], + files.retrieve_content, # pyright: ignore[reportDeprecated], ) ) @@ -730,7 +740,7 @@ def __init__(self, files: Files) -> None: ) self.retrieve_content = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( - files.retrieve_content # pyright: ignore[reportDeprecated], + files.retrieve_content, # pyright: ignore[reportDeprecated], ) ) @@ -757,6 +767,6 @@ def __init__(self, files: AsyncFiles) -> None: ) self.retrieve_content = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( - files.retrieve_content # pyright: ignore[reportDeprecated], + files.retrieve_content, # pyright: ignore[reportDeprecated], ) ) diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/permissions.py b/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/permissions.py index 547e42ec..f8ae1259 100644 --- a/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/permissions.py +++ b/portkey_ai/_vendor/openai/resources/fine_tuning/checkpoints/permissions.py @@ -2,13 +2,12 @@ from __future__ import annotations -from typing import List from typing_extensions import Literal import httpx from .... import _legacy_response -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource @@ -47,7 +46,7 @@ def create( self, fine_tuned_model_checkpoint: str, *, - project_ids: List[str], + project_ids: SequenceNotStr[str], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -215,7 +214,7 @@ def create( self, fine_tuned_model_checkpoint: str, *, - project_ids: List[str], + project_ids: SequenceNotStr[str], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py b/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py index 5cca2191..ee21cdd2 100644 --- a/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py +++ b/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py @@ -84,7 +84,7 @@ def create( Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) Args: model: The name of the model to fine-tune. You can select one of the @@ -105,7 +105,8 @@ def create( [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated @@ -142,7 +143,8 @@ def create( Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. extra_headers: Send extra headers @@ -189,7 +191,7 @@ def retrieve( """ Get info about a fine-tuning job. - [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) Args: extra_headers: Send extra headers @@ -462,7 +464,7 @@ async def create( Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) Args: model: The name of the model to fine-tune. You can select one of the @@ -483,7 +485,8 @@ async def create( [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated @@ -520,7 +523,8 @@ async def create( Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. extra_headers: Send extra headers @@ -567,7 +571,7 @@ async def retrieve( """ Get info about a fine-tuning job. - [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) Args: extra_headers: Send extra headers diff --git a/portkey_ai/_vendor/openai/resources/images.py b/portkey_ai/_vendor/openai/resources/images.py index 0f1c9fcb..17ec264b 100644 --- a/portkey_ai/_vendor/openai/resources/images.py +++ b/portkey_ai/_vendor/openai/resources/images.py @@ -2,21 +2,24 @@ from __future__ import annotations -from typing import List, Union, Mapping, Optional, cast -from typing_extensions import Literal +from typing import Union, Mapping, Optional, cast +from typing_extensions import Literal, overload import httpx from .. import _legacy_response from ..types import image_edit_params, image_generate_params, image_create_variation_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes -from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes, SequenceNotStr +from .._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from .._streaming import Stream, AsyncStream from .._base_client import make_request_options from ..types.image_model import ImageModel from ..types.images_response import ImagesResponse +from ..types.image_gen_stream_event import ImageGenStreamEvent +from ..types.image_edit_stream_event import ImageEditStreamEvent __all__ = ["Images", "AsyncImages"] @@ -114,19 +117,25 @@ def create_variation( cast_to=ImagesResponse, ) + @overload def edit( self, *, - image: Union[FileTypes, List[FileTypes]], + image: Union[FileTypes, SequenceNotStr[FileTypes]], prompt: str, background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, mask: FileTypes | NotGiven = NOT_GIVEN, model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -160,6 +169,10 @@ def edit( If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, the mask will be applied on the first image. Must be a valid PNG file, less than @@ -171,6 +184,21 @@ def edit( n: The number of images to generate. Must be between 1 and 10. + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -184,6 +212,10 @@ def edit( (landscape), `1024x1536` (portrait), or `auto` (default value) for `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). @@ -196,52 +228,27 @@ def edit( timeout: Override the client-level default timeout for this request, in seconds """ - body = deepcopy_minimal( - { - "image": image, - "prompt": prompt, - "background": background, - "mask": mask, - "model": model, - "n": n, - "quality": quality, - "response_format": response_format, - "size": size, - "user": user, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", ""], ["mask"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - "/images/edits", - body=maybe_transform(body, image_edit_params.ImageEditParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) + ... - def generate( + @overload + def edit( self, *, + image: Union[FileTypes, SequenceNotStr[FileTypes]], prompt: str, + stream: Literal[True], background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, output_compression: Optional[int] | NotGiven = NOT_GIVEN, output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[ - Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] - ] + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | NotGiven = NOT_GIVEN, - style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -249,15 +256,27 @@ def generate( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: - """ - Creates an image given a prompt. - [Learn more](https://platform.openai.com/docs/guides/images). + ) -> Stream[ImageEditStreamEvent]: + """Creates an edited or extended image given one or more source images and a + prompt. + + This endpoint only supports `gpt-image-1` and `dall-e-2`. Args: - prompt: A text description of the desired image(s). The maximum length is 32000 - characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters - for `dall-e-3`. + image: The image(s) to edit. Must be a supported image file or an array of images. + + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 50MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. + + prompt: A text description of the desired image(s). The maximum length is 1000 + characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. background: Allows to set transparency for the background of the generated image(s). This parameter is only supported for `gpt-image-1`. Must be one of `transparent`, @@ -267,139 +286,48 @@ def generate( If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or - `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to - `gpt-image-1` is used. + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. - moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must - be either `low` for less restrictive filtering or `auto` (default value). + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) + indicate where `image` should be edited. If there are multiple images provided, + the mask will be applied on the first image. Must be a valid PNG file, less than + 4MB, and have the same dimensions as `image`. - n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - `n=1` is supported. + model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + is used. + + n: The number of images to generate. Must be between 1 and 10. output_compression: The compression level (0-100%) for the generated images. This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. - - quality: The quality of the image that will be generated. - - - `auto` (default value) will automatically select the best quality for the - given model. - - `high`, `medium` and `low` are supported for `gpt-image-1`. - - `hd` and `standard` are supported for `dall-e-3`. - - `standard` is the only option for `dall-e-2`. - - response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are - returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes - after the image has been generated. This parameter isn't supported for - `gpt-image-1` which will always return base64-encoded images. - - size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` - (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and - one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. - - style: The style of the generated images. This parameter is only supported for - `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean - towards generating hyper-real and dramatic images. Natural causes the model to - produce more natural, less hyper-real looking images. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/images/generations", - body=maybe_transform( - { - "prompt": prompt, - "background": background, - "model": model, - "moderation": moderation, - "n": n, - "output_compression": output_compression, - "output_format": output_format, - "quality": quality, - "response_format": response_format, - "size": size, - "style": style, - "user": user, - }, - image_generate_params.ImageGenerateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) - - -class AsyncImages(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncImagesWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers - """ - return AsyncImagesWithRawResponse(self) + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. - @cached_property - def with_streaming_response(self) -> AsyncImagesWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/openai/openai-python#with_streaming_response - """ - return AsyncImagesWithStreamingResponse(self) - - async def create_variation( - self, - *, - image: FileTypes, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: - """Creates a variation of a given image. - - This endpoint only supports `dall-e-2`. - - Args: - image: The image to use as the basis for the variation(s). Must be a valid PNG file, - less than 4MB, and square. + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. - model: The model to use for image generation. Only `dall-e-2` is supported at this - time. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. - n: The number of images to generate. Must be between 1 and 10. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are + only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + Defaults to `auto`. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. + generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + will always return base64-encoded images. - size: The size of the generated images. Must be one of `256x256`, `512x512`, or - `1024x1024`. + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. @@ -413,40 +341,23 @@ async def create_variation( timeout: Override the client-level default timeout for this request, in seconds """ - body = deepcopy_minimal( - { - "image": image, - "model": model, - "n": n, - "response_format": response_format, - "size": size, - "user": user, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - "/images/variations", - body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) + ... - async def edit( + @overload + def edit( self, *, - image: Union[FileTypes, List[FileTypes]], + image: Union[FileTypes, SequenceNotStr[FileTypes]], prompt: str, + stream: bool, background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, mask: FileTypes | NotGiven = NOT_GIVEN, model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] @@ -458,7 +369,7 @@ async def edit( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: + ) -> ImagesResponse | Stream[ImageEditStreamEvent]: """Creates an edited or extended image given one or more source images and a prompt. @@ -476,6 +387,10 @@ async def edit( prompt: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + background: Allows to set transparency for the background of the generated image(s). This parameter is only supported for `gpt-image-1`. Must be one of `transparent`, `opaque` or `auto` (default value). When `auto` is used, the model will @@ -484,6 +399,10 @@ async def edit( If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, the mask will be applied on the first image. Must be a valid PNG file, less than @@ -495,6 +414,21 @@ async def edit( n: The number of images to generate. Must be between 1 and 10. + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -520,17 +454,939 @@ async def edit( timeout: Override the client-level default timeout for this request, in seconds """ - body = deepcopy_minimal( - { - "image": image, - "prompt": prompt, - "background": background, - "mask": mask, - "model": model, - "n": n, - "quality": quality, - "response_format": response_format, - "size": size, + ... + + @required_args(["image", "prompt"], ["image", "prompt", "stream"]) + def edit( + self, + *, + image: Union[FileTypes, SequenceNotStr[FileTypes]], + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | Stream[ImageEditStreamEvent]: + body = deepcopy_minimal( + { + "image": image, + "prompt": prompt, + "background": background, + "input_fidelity": input_fidelity, + "mask": mask, + "model": model, + "n": n, + "output_compression": output_compression, + "output_format": output_format, + "partial_images": partial_images, + "quality": quality, + "response_format": response_format, + "size": size, + "stream": stream, + "user": user, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", ""], ["mask"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return self._post( + "/images/edits", + body=maybe_transform( + body, + image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming, + ), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + stream=stream or False, + stream_cls=Stream[ImageEditStreamEvent], + ) + + @overload + def generate( + self, + *, + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """ + Creates an image given a prompt. + [Learn more](https://platform.openai.com/docs/guides/images). + + Args: + prompt: A text description of the desired image(s). The maximum length is 32000 + characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + for `dall-e-3`. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + `gpt-image-1` is used. + + moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must + be either `low` for less restrictive filtering or `auto` (default value). + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + + quality: The quality of the image that will be generated. + + - `auto` (default value) will automatically select the best quality for the + given model. + - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `hd` and `standard` are supported for `dall-e-3`. + - `standard` is the only option for `dall-e-2`. + + response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are + returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + after the image has been generated. This parameter isn't supported for + `gpt-image-1` which will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + + style: The style of the generated images. This parameter is only supported for + `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + towards generating hyper-real and dramatic images. Natural causes the model to + produce more natural, less hyper-real looking images. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def generate( + self, + *, + prompt: str, + stream: Literal[True], + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Stream[ImageGenStreamEvent]: + """ + Creates an image given a prompt. + [Learn more](https://platform.openai.com/docs/guides/images). + + Args: + prompt: A text description of the desired image(s). The maximum length is 32000 + characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + for `dall-e-3`. + + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + `gpt-image-1` is used. + + moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must + be either `low` for less restrictive filtering or `auto` (default value). + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + + quality: The quality of the image that will be generated. + + - `auto` (default value) will automatically select the best quality for the + given model. + - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `hd` and `standard` are supported for `dall-e-3`. + - `standard` is the only option for `dall-e-2`. + + response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are + returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + after the image has been generated. This parameter isn't supported for + `gpt-image-1` which will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + + style: The style of the generated images. This parameter is only supported for + `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + towards generating hyper-real and dramatic images. Natural causes the model to + produce more natural, less hyper-real looking images. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def generate( + self, + *, + prompt: str, + stream: bool, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | Stream[ImageGenStreamEvent]: + """ + Creates an image given a prompt. + [Learn more](https://platform.openai.com/docs/guides/images). + + Args: + prompt: A text description of the desired image(s). The maximum length is 32000 + characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + for `dall-e-3`. + + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + `gpt-image-1` is used. + + moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must + be either `low` for less restrictive filtering or `auto` (default value). + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + + quality: The quality of the image that will be generated. + + - `auto` (default value) will automatically select the best quality for the + given model. + - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `hd` and `standard` are supported for `dall-e-3`. + - `standard` is the only option for `dall-e-2`. + + response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are + returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + after the image has been generated. This parameter isn't supported for + `gpt-image-1` which will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + + style: The style of the generated images. This parameter is only supported for + `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + towards generating hyper-real and dramatic images. Natural causes the model to + produce more natural, less hyper-real looking images. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["prompt"], ["prompt", "stream"]) + def generate( + self, + *, + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | Stream[ImageGenStreamEvent]: + return self._post( + "/images/generations", + body=maybe_transform( + { + "prompt": prompt, + "background": background, + "model": model, + "moderation": moderation, + "n": n, + "output_compression": output_compression, + "output_format": output_format, + "partial_images": partial_images, + "quality": quality, + "response_format": response_format, + "size": size, + "stream": stream, + "style": style, + "user": user, + }, + image_generate_params.ImageGenerateParamsStreaming + if stream + else image_generate_params.ImageGenerateParamsNonStreaming, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + stream=stream or False, + stream_cls=Stream[ImageGenStreamEvent], + ) + + +class AsyncImages(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncImagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncImagesWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncImagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncImagesWithStreamingResponse(self) + + async def create_variation( + self, + *, + image: FileTypes, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """Creates a variation of a given image. + + This endpoint only supports `dall-e-2`. + + Args: + image: The image to use as the basis for the variation(s). Must be a valid PNG file, + less than 4MB, and square. + + model: The model to use for image generation. Only `dall-e-2` is supported at this + time. + + n: The number of images to generate. Must be between 1 and 10. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. + + size: The size of the generated images. Must be one of `256x256`, `512x512`, or + `1024x1024`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "image": image, + "model": model, + "n": n, + "response_format": response_format, + "size": size, + "user": user, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return await self._post( + "/images/variations", + body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + ) + + @overload + async def edit( + self, + *, + image: Union[FileTypes, SequenceNotStr[FileTypes]], + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """Creates an edited or extended image given one or more source images and a + prompt. + + This endpoint only supports `gpt-image-1` and `dall-e-2`. + + Args: + image: The image(s) to edit. Must be a supported image file or an array of images. + + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 50MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. + + prompt: A text description of the desired image(s). The maximum length is 1000 + characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) + indicate where `image` should be edited. If there are multiple images provided, + the mask will be applied on the first image. Must be a valid PNG file, less than + 4MB, and have the same dimensions as `image`. + + model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + is used. + + n: The number of images to generate. Must be between 1 and 10. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + + quality: The quality of the image that will be generated. `high`, `medium` and `low` are + only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + Defaults to `auto`. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def edit( + self, + *, + image: Union[FileTypes, SequenceNotStr[FileTypes]], + prompt: str, + stream: Literal[True], + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncStream[ImageEditStreamEvent]: + """Creates an edited or extended image given one or more source images and a + prompt. + + This endpoint only supports `gpt-image-1` and `dall-e-2`. + + Args: + image: The image(s) to edit. Must be a supported image file or an array of images. + + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 50MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. + + prompt: A text description of the desired image(s). The maximum length is 1000 + characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) + indicate where `image` should be edited. If there are multiple images provided, + the mask will be applied on the first image. Must be a valid PNG file, less than + 4MB, and have the same dimensions as `image`. + + model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + is used. + + n: The number of images to generate. Must be between 1 and 10. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + + quality: The quality of the image that will be generated. `high`, `medium` and `low` are + only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + Defaults to `auto`. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def edit( + self, + *, + image: Union[FileTypes, SequenceNotStr[FileTypes]], + prompt: str, + stream: bool, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]: + """Creates an edited or extended image given one or more source images and a + prompt. + + This endpoint only supports `gpt-image-1` and `dall-e-2`. + + Args: + image: The image(s) to edit. Must be a supported image file or an array of images. + + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 50MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. + + prompt: A text description of the desired image(s). The maximum length is 1000 + characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) + indicate where `image` should be edited. If there are multiple images provided, + the mask will be applied on the first image. Must be a valid PNG file, less than + 4MB, and have the same dimensions as `image`. + + model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + is used. + + n: The number of images to generate. Must be between 1 and 10. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + + quality: The quality of the image that will be generated. `high`, `medium` and `low` are + only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + Defaults to `auto`. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["image", "prompt"], ["image", "prompt", "stream"]) + async def edit( + self, + *, + image: Union[FileTypes, SequenceNotStr[FileTypes]], + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]: + body = deepcopy_minimal( + { + "image": image, + "prompt": prompt, + "background": background, + "input_fidelity": input_fidelity, + "mask": mask, + "model": model, + "n": n, + "output_compression": output_compression, + "output_format": output_format, + "partial_images": partial_images, + "quality": quality, + "response_format": response_format, + "size": size, + "stream": stream, "user": user, } ) @@ -541,14 +1397,20 @@ async def edit( extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return await self._post( "/images/edits", - body=await async_maybe_transform(body, image_edit_params.ImageEditParams), + body=await async_maybe_transform( + body, + image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming, + ), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ImagesResponse, + stream=stream or False, + stream_cls=AsyncStream[ImageEditStreamEvent], ) + @overload async def generate( self, *, @@ -559,12 +1421,14 @@ async def generate( n: Optional[int] | NotGiven = NOT_GIVEN, output_compression: Optional[int] | NotGiven = NOT_GIVEN, output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] ] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -608,6 +1472,237 @@ async def generate( output_format: The format in which the generated images are returned. This parameter is only supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + + quality: The quality of the image that will be generated. + + - `auto` (default value) will automatically select the best quality for the + given model. + - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `hd` and `standard` are supported for `dall-e-3`. + - `standard` is the only option for `dall-e-2`. + + response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are + returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + after the image has been generated. This parameter isn't supported for + `gpt-image-1` which will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + + style: The style of the generated images. This parameter is only supported for + `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + towards generating hyper-real and dramatic images. Natural causes the model to + produce more natural, less hyper-real looking images. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def generate( + self, + *, + prompt: str, + stream: Literal[True], + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncStream[ImageGenStreamEvent]: + """ + Creates an image given a prompt. + [Learn more](https://platform.openai.com/docs/guides/images). + + Args: + prompt: A text description of the desired image(s). The maximum length is 32000 + characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + for `dall-e-3`. + + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + `gpt-image-1` is used. + + moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must + be either `low` for less restrictive filtering or `auto` (default value). + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + + quality: The quality of the image that will be generated. + + - `auto` (default value) will automatically select the best quality for the + given model. + - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `hd` and `standard` are supported for `dall-e-3`. + - `standard` is the only option for `dall-e-2`. + + response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are + returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + after the image has been generated. This parameter isn't supported for + `gpt-image-1` which will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + + style: The style of the generated images. This parameter is only supported for + `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + towards generating hyper-real and dramatic images. Natural causes the model to + produce more natural, less hyper-real looking images. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def generate( + self, + *, + prompt: str, + stream: bool, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]: + """ + Creates an image given a prompt. + [Learn more](https://platform.openai.com/docs/guides/images). + + Args: + prompt: A text description of the desired image(s). The maximum length is 32000 + characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + for `dall-e-3`. + + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + `gpt-image-1` is used. + + moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must + be either `low` for less restrictive filtering or `auto` (default value). + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -643,6 +1738,36 @@ async def generate( timeout: Override the client-level default timeout for this request, in seconds """ + ... + + @required_args(["prompt"], ["prompt", "stream"]) + async def generate( + self, + *, + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]: return await self._post( "/images/generations", body=await async_maybe_transform( @@ -654,18 +1779,24 @@ async def generate( "n": n, "output_compression": output_compression, "output_format": output_format, + "partial_images": partial_images, "quality": quality, "response_format": response_format, "size": size, + "stream": stream, "style": style, "user": user, }, - image_generate_params.ImageGenerateParams, + image_generate_params.ImageGenerateParamsStreaming + if stream + else image_generate_params.ImageGenerateParamsNonStreaming, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ImagesResponse, + stream=stream or False, + stream_cls=AsyncStream[ImageGenStreamEvent], ) diff --git a/portkey_ai/_vendor/openai/resources/moderations.py b/portkey_ai/_vendor/openai/resources/moderations.py index f7a8b52c..91c0df43 100644 --- a/portkey_ai/_vendor/openai/resources/moderations.py +++ b/portkey_ai/_vendor/openai/resources/moderations.py @@ -2,13 +2,13 @@ from __future__ import annotations -from typing import List, Union, Iterable +from typing import Union, Iterable import httpx from .. import _legacy_response from ..types import moderation_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from .._utils import maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource @@ -44,7 +44,7 @@ def with_streaming_response(self) -> ModerationsWithStreamingResponse: def create( self, *, - input: Union[str, List[str], Iterable[ModerationMultiModalInputParam]], + input: Union[str, SequenceNotStr[str], Iterable[ModerationMultiModalInputParam]], model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -114,7 +114,7 @@ def with_streaming_response(self) -> AsyncModerationsWithStreamingResponse: async def create( self, *, - input: Union[str, List[str], Iterable[ModerationMultiModalInputParam]], + input: Union[str, SequenceNotStr[str], Iterable[ModerationMultiModalInputParam]], model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. diff --git a/portkey_ai/_vendor/openai/resources/realtime/__init__.py b/portkey_ai/_vendor/openai/resources/realtime/__init__.py new file mode 100644 index 00000000..7a41de86 --- /dev/null +++ b/portkey_ai/_vendor/openai/resources/realtime/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .realtime import ( + Realtime, + AsyncRealtime, + RealtimeWithRawResponse, + AsyncRealtimeWithRawResponse, + RealtimeWithStreamingResponse, + AsyncRealtimeWithStreamingResponse, +) +from .client_secrets import ( + ClientSecrets, + AsyncClientSecrets, + ClientSecretsWithRawResponse, + AsyncClientSecretsWithRawResponse, + ClientSecretsWithStreamingResponse, + AsyncClientSecretsWithStreamingResponse, +) + +__all__ = [ + "ClientSecrets", + "AsyncClientSecrets", + "ClientSecretsWithRawResponse", + "AsyncClientSecretsWithRawResponse", + "ClientSecretsWithStreamingResponse", + "AsyncClientSecretsWithStreamingResponse", + "Realtime", + "AsyncRealtime", + "RealtimeWithRawResponse", + "AsyncRealtimeWithRawResponse", + "RealtimeWithStreamingResponse", + "AsyncRealtimeWithStreamingResponse", +] diff --git a/portkey_ai/_vendor/openai/resources/realtime/client_secrets.py b/portkey_ai/_vendor/openai/resources/realtime/client_secrets.py new file mode 100644 index 00000000..a7946074 --- /dev/null +++ b/portkey_ai/_vendor/openai/resources/realtime/client_secrets.py @@ -0,0 +1,189 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ... import _legacy_response +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ..._base_client import make_request_options +from ...types.realtime import client_secret_create_params +from ...types.realtime.client_secret_create_response import ClientSecretCreateResponse + +__all__ = ["ClientSecrets", "AsyncClientSecrets"] + + +class ClientSecrets(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ClientSecretsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return ClientSecretsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ClientSecretsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return ClientSecretsWithStreamingResponse(self) + + def create( + self, + *, + expires_after: client_secret_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, + session: client_secret_create_params.Session | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ClientSecretCreateResponse: + """ + Create a Realtime client secret with an associated session configuration. + + Args: + expires_after: Configuration for the client secret expiration. Expiration refers to the time + after which a client secret will no longer be valid for creating sessions. The + session itself may continue after that time once started. A secret can be used + to create multiple sessions until it expires. + + session: Session configuration to use for the client secret. Choose either a realtime + session or a transcription session. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/realtime/client_secrets", + body=maybe_transform( + { + "expires_after": expires_after, + "session": session, + }, + client_secret_create_params.ClientSecretCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ClientSecretCreateResponse, + ) + + +class AsyncClientSecrets(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncClientSecretsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncClientSecretsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncClientSecretsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncClientSecretsWithStreamingResponse(self) + + async def create( + self, + *, + expires_after: client_secret_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, + session: client_secret_create_params.Session | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ClientSecretCreateResponse: + """ + Create a Realtime client secret with an associated session configuration. + + Args: + expires_after: Configuration for the client secret expiration. Expiration refers to the time + after which a client secret will no longer be valid for creating sessions. The + session itself may continue after that time once started. A secret can be used + to create multiple sessions until it expires. + + session: Session configuration to use for the client secret. Choose either a realtime + session or a transcription session. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/realtime/client_secrets", + body=await async_maybe_transform( + { + "expires_after": expires_after, + "session": session, + }, + client_secret_create_params.ClientSecretCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ClientSecretCreateResponse, + ) + + +class ClientSecretsWithRawResponse: + def __init__(self, client_secrets: ClientSecrets) -> None: + self._client_secrets = client_secrets + + self.create = _legacy_response.to_raw_response_wrapper( + client_secrets.create, + ) + + +class AsyncClientSecretsWithRawResponse: + def __init__(self, client_secrets: AsyncClientSecrets) -> None: + self._client_secrets = client_secrets + + self.create = _legacy_response.async_to_raw_response_wrapper( + client_secrets.create, + ) + + +class ClientSecretsWithStreamingResponse: + def __init__(self, client_secrets: ClientSecrets) -> None: + self._client_secrets = client_secrets + + self.create = to_streamed_response_wrapper( + client_secrets.create, + ) + + +class AsyncClientSecretsWithStreamingResponse: + def __init__(self, client_secrets: AsyncClientSecrets) -> None: + self._client_secrets = client_secrets + + self.create = async_to_streamed_response_wrapper( + client_secrets.create, + ) diff --git a/portkey_ai/_vendor/openai/resources/realtime/realtime.py b/portkey_ai/_vendor/openai/resources/realtime/realtime.py new file mode 100644 index 00000000..64fca729 --- /dev/null +++ b/portkey_ai/_vendor/openai/resources/realtime/realtime.py @@ -0,0 +1,1041 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import json +import logging +from types import TracebackType +from typing import TYPE_CHECKING, Any, Iterator, cast +from typing_extensions import AsyncIterator + +import httpx +from pydantic import BaseModel + +from ..._types import NOT_GIVEN, Query, Headers, NotGiven +from ..._utils import ( + is_azure_client, + maybe_transform, + strip_not_given, + async_maybe_transform, + is_async_azure_client, +) +from ..._compat import cached_property +from ..._models import construct_type_unchecked +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._exceptions import OpenAIError +from ..._base_client import _merge_mappings +from .client_secrets import ( + ClientSecrets, + AsyncClientSecrets, + ClientSecretsWithRawResponse, + AsyncClientSecretsWithRawResponse, + ClientSecretsWithStreamingResponse, + AsyncClientSecretsWithStreamingResponse, +) +from ...types.realtime import session_update_event_param +from ...types.websocket_connection_options import WebsocketConnectionOptions +from ...types.realtime.realtime_client_event import RealtimeClientEvent +from ...types.realtime.realtime_server_event import RealtimeServerEvent +from ...types.realtime.conversation_item_param import ConversationItemParam +from ...types.realtime.realtime_client_event_param import RealtimeClientEventParam +from ...types.realtime.realtime_response_create_params_param import RealtimeResponseCreateParamsParam + +if TYPE_CHECKING: + from websockets.sync.client import ClientConnection as WebsocketConnection + from websockets.asyncio.client import ClientConnection as AsyncWebsocketConnection + + from ..._client import OpenAI, AsyncOpenAI + +__all__ = ["Realtime", "AsyncRealtime"] + +log: logging.Logger = logging.getLogger(__name__) + + +class Realtime(SyncAPIResource): + @cached_property + def client_secrets(self) -> ClientSecrets: + return ClientSecrets(self._client) + + @cached_property + def with_raw_response(self) -> RealtimeWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return RealtimeWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RealtimeWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return RealtimeWithStreamingResponse(self) + + def connect( + self, + *, + model: str, + extra_query: Query = {}, + extra_headers: Headers = {}, + websocket_connection_options: WebsocketConnectionOptions = {}, + ) -> RealtimeConnectionManager: + """ + The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling. + + Some notable benefits of the API include: + + - Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output. + - Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction. + - Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback. + + The Realtime API is a stateful, event-based API that communicates over a WebSocket. + """ + return RealtimeConnectionManager( + client=self._client, + extra_query=extra_query, + extra_headers=extra_headers, + websocket_connection_options=websocket_connection_options, + model=model, + ) + + +class AsyncRealtime(AsyncAPIResource): + @cached_property + def client_secrets(self) -> AsyncClientSecrets: + return AsyncClientSecrets(self._client) + + @cached_property + def with_raw_response(self) -> AsyncRealtimeWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncRealtimeWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRealtimeWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncRealtimeWithStreamingResponse(self) + + def connect( + self, + *, + model: str, + extra_query: Query = {}, + extra_headers: Headers = {}, + websocket_connection_options: WebsocketConnectionOptions = {}, + ) -> AsyncRealtimeConnectionManager: + """ + The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling. + + Some notable benefits of the API include: + + - Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output. + - Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction. + - Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback. + + The Realtime API is a stateful, event-based API that communicates over a WebSocket. + """ + return AsyncRealtimeConnectionManager( + client=self._client, + extra_query=extra_query, + extra_headers=extra_headers, + websocket_connection_options=websocket_connection_options, + model=model, + ) + + +class RealtimeWithRawResponse: + def __init__(self, realtime: Realtime) -> None: + self._realtime = realtime + + @cached_property + def client_secrets(self) -> ClientSecretsWithRawResponse: + return ClientSecretsWithRawResponse(self._realtime.client_secrets) + + +class AsyncRealtimeWithRawResponse: + def __init__(self, realtime: AsyncRealtime) -> None: + self._realtime = realtime + + @cached_property + def client_secrets(self) -> AsyncClientSecretsWithRawResponse: + return AsyncClientSecretsWithRawResponse(self._realtime.client_secrets) + + +class RealtimeWithStreamingResponse: + def __init__(self, realtime: Realtime) -> None: + self._realtime = realtime + + @cached_property + def client_secrets(self) -> ClientSecretsWithStreamingResponse: + return ClientSecretsWithStreamingResponse(self._realtime.client_secrets) + + +class AsyncRealtimeWithStreamingResponse: + def __init__(self, realtime: AsyncRealtime) -> None: + self._realtime = realtime + + @cached_property + def client_secrets(self) -> AsyncClientSecretsWithStreamingResponse: + return AsyncClientSecretsWithStreamingResponse(self._realtime.client_secrets) + + +class AsyncRealtimeConnection: + """Represents a live websocket connection to the Realtime API""" + + session: AsyncRealtimeSessionResource + response: AsyncRealtimeResponseResource + input_audio_buffer: AsyncRealtimeInputAudioBufferResource + conversation: AsyncRealtimeConversationResource + output_audio_buffer: AsyncRealtimeOutputAudioBufferResource + + _connection: AsyncWebsocketConnection + + def __init__(self, connection: AsyncWebsocketConnection) -> None: + self._connection = connection + + self.session = AsyncRealtimeSessionResource(self) + self.response = AsyncRealtimeResponseResource(self) + self.input_audio_buffer = AsyncRealtimeInputAudioBufferResource(self) + self.conversation = AsyncRealtimeConversationResource(self) + self.output_audio_buffer = AsyncRealtimeOutputAudioBufferResource(self) + + async def __aiter__(self) -> AsyncIterator[RealtimeServerEvent]: + """ + An infinite-iterator that will continue to yield events until + the connection is closed. + """ + from websockets.exceptions import ConnectionClosedOK + + try: + while True: + yield await self.recv() + except ConnectionClosedOK: + return + + async def recv(self) -> RealtimeServerEvent: + """ + Receive the next message from the connection and parses it into a `RealtimeServerEvent` object. + + Canceling this method is safe. There's no risk of losing data. + """ + return self.parse_event(await self.recv_bytes()) + + async def recv_bytes(self) -> bytes: + """Receive the next message from the connection as raw bytes. + + Canceling this method is safe. There's no risk of losing data. + + If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does, + then you can call `.parse_event(data)`. + """ + message = await self._connection.recv(decode=False) + log.debug(f"Received websocket message: %s", message) + return message + + async def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None: + data = ( + event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True) + if isinstance(event, BaseModel) + else json.dumps(await async_maybe_transform(event, RealtimeClientEventParam)) + ) + await self._connection.send(data) + + async def close(self, *, code: int = 1000, reason: str = "") -> None: + await self._connection.close(code=code, reason=reason) + + def parse_event(self, data: str | bytes) -> RealtimeServerEvent: + """ + Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object. + + This is helpful if you're using `.recv_bytes()`. + """ + return cast( + RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent)) + ) + + +class AsyncRealtimeConnectionManager: + """ + Context manager over a `AsyncRealtimeConnection` that is returned by `realtime.connect()` + + This context manager ensures that the connection will be closed when it exits. + + --- + + Note that if your application doesn't work well with the context manager approach then you + can call the `.enter()` method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = await client.realtime.connect(...).enter() + # ... + await connection.close() + ``` + """ + + def __init__( + self, + *, + client: AsyncOpenAI, + model: str, + extra_query: Query, + extra_headers: Headers, + websocket_connection_options: WebsocketConnectionOptions, + ) -> None: + self.__client = client + self.__model = model + self.__connection: AsyncRealtimeConnection | None = None + self.__extra_query = extra_query + self.__extra_headers = extra_headers + self.__websocket_connection_options = websocket_connection_options + + async def __aenter__(self) -> AsyncRealtimeConnection: + """ + 👋 If your application doesn't work well with the context manager approach then you + can call this method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = await client.realtime.connect(...).enter() + # ... + await connection.close() + ``` + """ + try: + from websockets.asyncio.client import connect + except ImportError as exc: + raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc + + extra_query = self.__extra_query + await self.__client._refresh_api_key() + auth_headers = self.__client.auth_headers + if is_async_azure_client(self.__client): + url, auth_headers = await self.__client._configure_realtime(self.__model, extra_query) + else: + url = self._prepare_url().copy_with( + params={ + **self.__client.base_url.params, + "model": self.__model, + **extra_query, + }, + ) + log.debug("Connecting to %s", url) + if self.__websocket_connection_options: + log.debug("Connection options: %s", self.__websocket_connection_options) + + self.__connection = AsyncRealtimeConnection( + await connect( + str(url), + user_agent_header=self.__client.user_agent, + additional_headers=_merge_mappings( + { + **auth_headers, + }, + self.__extra_headers, + ), + **self.__websocket_connection_options, + ) + ) + + return self.__connection + + enter = __aenter__ + + def _prepare_url(self) -> httpx.URL: + if self.__client.websocket_base_url is not None: + base_url = httpx.URL(self.__client.websocket_base_url) + else: + base_url = self.__client._base_url.copy_with(scheme="wss") + + merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime" + return base_url.copy_with(raw_path=merge_raw_path) + + async def __aexit__( + self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None + ) -> None: + if self.__connection is not None: + await self.__connection.close() + + +class RealtimeConnection: + """Represents a live websocket connection to the Realtime API""" + + session: RealtimeSessionResource + response: RealtimeResponseResource + input_audio_buffer: RealtimeInputAudioBufferResource + conversation: RealtimeConversationResource + output_audio_buffer: RealtimeOutputAudioBufferResource + + _connection: WebsocketConnection + + def __init__(self, connection: WebsocketConnection) -> None: + self._connection = connection + + self.session = RealtimeSessionResource(self) + self.response = RealtimeResponseResource(self) + self.input_audio_buffer = RealtimeInputAudioBufferResource(self) + self.conversation = RealtimeConversationResource(self) + self.output_audio_buffer = RealtimeOutputAudioBufferResource(self) + + def __iter__(self) -> Iterator[RealtimeServerEvent]: + """ + An infinite-iterator that will continue to yield events until + the connection is closed. + """ + from websockets.exceptions import ConnectionClosedOK + + try: + while True: + yield self.recv() + except ConnectionClosedOK: + return + + def recv(self) -> RealtimeServerEvent: + """ + Receive the next message from the connection and parses it into a `RealtimeServerEvent` object. + + Canceling this method is safe. There's no risk of losing data. + """ + return self.parse_event(self.recv_bytes()) + + def recv_bytes(self) -> bytes: + """Receive the next message from the connection as raw bytes. + + Canceling this method is safe. There's no risk of losing data. + + If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does, + then you can call `.parse_event(data)`. + """ + message = self._connection.recv(decode=False) + log.debug(f"Received websocket message: %s", message) + return message + + def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None: + data = ( + event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True) + if isinstance(event, BaseModel) + else json.dumps(maybe_transform(event, RealtimeClientEventParam)) + ) + self._connection.send(data) + + def close(self, *, code: int = 1000, reason: str = "") -> None: + self._connection.close(code=code, reason=reason) + + def parse_event(self, data: str | bytes) -> RealtimeServerEvent: + """ + Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object. + + This is helpful if you're using `.recv_bytes()`. + """ + return cast( + RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent)) + ) + + +class RealtimeConnectionManager: + """ + Context manager over a `RealtimeConnection` that is returned by `realtime.connect()` + + This context manager ensures that the connection will be closed when it exits. + + --- + + Note that if your application doesn't work well with the context manager approach then you + can call the `.enter()` method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = client.realtime.connect(...).enter() + # ... + connection.close() + ``` + """ + + def __init__( + self, + *, + client: OpenAI, + model: str, + extra_query: Query, + extra_headers: Headers, + websocket_connection_options: WebsocketConnectionOptions, + ) -> None: + self.__client = client + self.__model = model + self.__connection: RealtimeConnection | None = None + self.__extra_query = extra_query + self.__extra_headers = extra_headers + self.__websocket_connection_options = websocket_connection_options + + def __enter__(self) -> RealtimeConnection: + """ + 👋 If your application doesn't work well with the context manager approach then you + can call this method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = client.realtime.connect(...).enter() + # ... + connection.close() + ``` + """ + try: + from websockets.sync.client import connect + except ImportError as exc: + raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc + + extra_query = self.__extra_query + self.__client._refresh_api_key() + auth_headers = self.__client.auth_headers + if is_azure_client(self.__client): + url, auth_headers = self.__client._configure_realtime(self.__model, extra_query) + else: + url = self._prepare_url().copy_with( + params={ + **self.__client.base_url.params, + "model": self.__model, + **extra_query, + }, + ) + log.debug("Connecting to %s", url) + if self.__websocket_connection_options: + log.debug("Connection options: %s", self.__websocket_connection_options) + + self.__connection = RealtimeConnection( + connect( + str(url), + user_agent_header=self.__client.user_agent, + additional_headers=_merge_mappings( + { + **auth_headers, + }, + self.__extra_headers, + ), + **self.__websocket_connection_options, + ) + ) + + return self.__connection + + enter = __enter__ + + def _prepare_url(self) -> httpx.URL: + if self.__client.websocket_base_url is not None: + base_url = httpx.URL(self.__client.websocket_base_url) + else: + base_url = self.__client._base_url.copy_with(scheme="wss") + + merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime" + return base_url.copy_with(raw_path=merge_raw_path) + + def __exit__( + self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None + ) -> None: + if self.__connection is not None: + self.__connection.close() + + +class BaseRealtimeConnectionResource: + def __init__(self, connection: RealtimeConnection) -> None: + self._connection = connection + + +class RealtimeSessionResource(BaseRealtimeConnectionResource): + def update(self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN) -> None: + """ + Send this event to update the session’s configuration. + The client may send this event at any time to update any field + except for `voice` and `model`. `voice` can be updated only if there have been no other audio outputs yet. + + When the server receives a `session.update`, it will respond + with a `session.updated` event showing the full, effective configuration. + Only the fields that are present in the `session.update` are updated. To clear a field like + `instructions`, pass an empty string. To clear a field like `tools`, pass an empty array. + To clear a field like `turn_detection`, pass `null`. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "session.update", "session": session, "event_id": event_id}), + ) + ) + + +class RealtimeResponseResource(BaseRealtimeConnectionResource): + def create( + self, + *, + event_id: str | NotGiven = NOT_GIVEN, + response: RealtimeResponseCreateParamsParam | NotGiven = NOT_GIVEN, + ) -> None: + """ + This event instructs the server to create a Response, which means triggering + model inference. When in Server VAD mode, the server will create Responses + automatically. + + A Response will include at least one Item, and may have two, in which case + the second will be a function call. These Items will be appended to the + conversation history by default. + + The server will respond with a `response.created` event, events for Items + and content created, and finally a `response.done` event to indicate the + Response is complete. + + The `response.create` event includes inference configuration like + `instructions` and `tools`. If these are set, they will override the Session's + configuration for this Response only. + + Responses can be created out-of-band of the default Conversation, meaning that they can + have arbitrary input, and it's possible to disable writing the output to the Conversation. + Only one Response can write to the default Conversation at a time, but otherwise multiple + Responses can be created in parallel. The `metadata` field is a good way to disambiguate + multiple simultaneous Responses. + + Clients can set `conversation` to `none` to create a Response that does not write to the default + Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting + raw Items and references to existing Items. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.create", "event_id": event_id, "response": response}), + ) + ) + + def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to cancel an in-progress response. + + The server will respond + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. It's safe + to call `response.cancel` even if no response is in progress, an error will be + returned the session will remain unaffected. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}), + ) + ) + + +class RealtimeInputAudioBufferResource(BaseRealtimeConnectionResource): + def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to clear the audio bytes in the buffer. + + The server will + respond with an `input_audio_buffer.cleared` event. + """ + self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id})) + ) + + def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """ + Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically. + + Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event. + """ + self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id})) + ) + + def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to append audio bytes to the input audio buffer. + + The audio + buffer is temporary storage you can write to and later commit. A "commit" will create a new + user message item in the conversation history from the buffer content and clear the buffer. + Input audio transcription (if enabled) will be generated when the buffer is committed. + + If VAD is enabled the audio buffer is used to detect speech and the server will decide + when to commit. When Server VAD is disabled, you must commit the audio buffer + manually. Input audio noise reduction operates on writes to the audio buffer. + + The client may choose how much audio to place in each event up to a maximum + of 15 MiB, for example streaming smaller chunks from the client may allow the + VAD to be more responsive. Unlike most other client events, the server will + not send a confirmation response to this event. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}), + ) + ) + + +class RealtimeConversationResource(BaseRealtimeConnectionResource): + @cached_property + def item(self) -> RealtimeConversationItemResource: + return RealtimeConversationItemResource(self._connection) + + +class RealtimeConversationItemResource(BaseRealtimeConnectionResource): + def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event when you want to remove any item from the conversation + history. + + The server will respond with a `conversation.item.deleted` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}), + ) + ) + + def create( + self, + *, + item: ConversationItemParam, + event_id: str | NotGiven = NOT_GIVEN, + previous_item_id: str | NotGiven = NOT_GIVEN, + ) -> None: + """ + Add a new Item to the Conversation's context, including messages, function + calls, and function call responses. This event can be used both to populate a + "history" of the conversation and to add new items mid-stream, but has the + current limitation that it cannot populate assistant audio messages. + + If successful, the server will respond with a `conversation.item.created` + event, otherwise an `error` event will be sent. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.create", + "item": item, + "event_id": event_id, + "previous_item_id": previous_item_id, + } + ), + ) + ) + + def truncate( + self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN + ) -> None: + """Send this event to truncate a previous assistant message’s audio. + + The server + will produce audio faster than realtime, so this event is useful when the user + interrupts to truncate audio that has already been sent to the client but not + yet played. This will synchronize the server's understanding of the audio with + the client's playback. + + Truncating audio will delete the server-side text transcript to ensure there + is not text in the context that hasn't been heard by the user. + + If successful, the server will respond with a `conversation.item.truncated` + event. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.truncate", + "audio_end_ms": audio_end_ms, + "content_index": content_index, + "item_id": item_id, + "event_id": event_id, + } + ), + ) + ) + + def retrieve(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """ + Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD. + The server will respond with a `conversation.item.retrieved` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "conversation.item.retrieve", "item_id": item_id, "event_id": event_id}), + ) + ) + + +class RealtimeOutputAudioBufferResource(BaseRealtimeConnectionResource): + def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """**WebRTC Only:** Emit to cut off the current audio response. + + This will trigger the server to + stop generating audio and emit a `output_audio_buffer.cleared` event. This + event should be preceded by a `response.cancel` client event to stop the + generation of the current response. + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "output_audio_buffer.clear", "event_id": event_id})) + ) + + +class BaseAsyncRealtimeConnectionResource: + def __init__(self, connection: AsyncRealtimeConnection) -> None: + self._connection = connection + + +class AsyncRealtimeSessionResource(BaseAsyncRealtimeConnectionResource): + async def update( + self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN + ) -> None: + """ + Send this event to update the session’s configuration. + The client may send this event at any time to update any field + except for `voice` and `model`. `voice` can be updated only if there have been no other audio outputs yet. + + When the server receives a `session.update`, it will respond + with a `session.updated` event showing the full, effective configuration. + Only the fields that are present in the `session.update` are updated. To clear a field like + `instructions`, pass an empty string. To clear a field like `tools`, pass an empty array. + To clear a field like `turn_detection`, pass `null`. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "session.update", "session": session, "event_id": event_id}), + ) + ) + + +class AsyncRealtimeResponseResource(BaseAsyncRealtimeConnectionResource): + async def create( + self, + *, + event_id: str | NotGiven = NOT_GIVEN, + response: RealtimeResponseCreateParamsParam | NotGiven = NOT_GIVEN, + ) -> None: + """ + This event instructs the server to create a Response, which means triggering + model inference. When in Server VAD mode, the server will create Responses + automatically. + + A Response will include at least one Item, and may have two, in which case + the second will be a function call. These Items will be appended to the + conversation history by default. + + The server will respond with a `response.created` event, events for Items + and content created, and finally a `response.done` event to indicate the + Response is complete. + + The `response.create` event includes inference configuration like + `instructions` and `tools`. If these are set, they will override the Session's + configuration for this Response only. + + Responses can be created out-of-band of the default Conversation, meaning that they can + have arbitrary input, and it's possible to disable writing the output to the Conversation. + Only one Response can write to the default Conversation at a time, but otherwise multiple + Responses can be created in parallel. The `metadata` field is a good way to disambiguate + multiple simultaneous Responses. + + Clients can set `conversation` to `none` to create a Response that does not write to the default + Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting + raw Items and references to existing Items. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.create", "event_id": event_id, "response": response}), + ) + ) + + async def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to cancel an in-progress response. + + The server will respond + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. It's safe + to call `response.cancel` even if no response is in progress, an error will be + returned the session will remain unaffected. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}), + ) + ) + + +class AsyncRealtimeInputAudioBufferResource(BaseAsyncRealtimeConnectionResource): + async def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to clear the audio bytes in the buffer. + + The server will + respond with an `input_audio_buffer.cleared` event. + """ + await self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id})) + ) + + async def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """ + Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically. + + Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event. + """ + await self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id})) + ) + + async def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to append audio bytes to the input audio buffer. + + The audio + buffer is temporary storage you can write to and later commit. A "commit" will create a new + user message item in the conversation history from the buffer content and clear the buffer. + Input audio transcription (if enabled) will be generated when the buffer is committed. + + If VAD is enabled the audio buffer is used to detect speech and the server will decide + when to commit. When Server VAD is disabled, you must commit the audio buffer + manually. Input audio noise reduction operates on writes to the audio buffer. + + The client may choose how much audio to place in each event up to a maximum + of 15 MiB, for example streaming smaller chunks from the client may allow the + VAD to be more responsive. Unlike most other client events, the server will + not send a confirmation response to this event. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}), + ) + ) + + +class AsyncRealtimeConversationResource(BaseAsyncRealtimeConnectionResource): + @cached_property + def item(self) -> AsyncRealtimeConversationItemResource: + return AsyncRealtimeConversationItemResource(self._connection) + + +class AsyncRealtimeConversationItemResource(BaseAsyncRealtimeConnectionResource): + async def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event when you want to remove any item from the conversation + history. + + The server will respond with a `conversation.item.deleted` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}), + ) + ) + + async def create( + self, + *, + item: ConversationItemParam, + event_id: str | NotGiven = NOT_GIVEN, + previous_item_id: str | NotGiven = NOT_GIVEN, + ) -> None: + """ + Add a new Item to the Conversation's context, including messages, function + calls, and function call responses. This event can be used both to populate a + "history" of the conversation and to add new items mid-stream, but has the + current limitation that it cannot populate assistant audio messages. + + If successful, the server will respond with a `conversation.item.created` + event, otherwise an `error` event will be sent. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.create", + "item": item, + "event_id": event_id, + "previous_item_id": previous_item_id, + } + ), + ) + ) + + async def truncate( + self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN + ) -> None: + """Send this event to truncate a previous assistant message’s audio. + + The server + will produce audio faster than realtime, so this event is useful when the user + interrupts to truncate audio that has already been sent to the client but not + yet played. This will synchronize the server's understanding of the audio with + the client's playback. + + Truncating audio will delete the server-side text transcript to ensure there + is not text in the context that hasn't been heard by the user. + + If successful, the server will respond with a `conversation.item.truncated` + event. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.truncate", + "audio_end_ms": audio_end_ms, + "content_index": content_index, + "item_id": item_id, + "event_id": event_id, + } + ), + ) + ) + + async def retrieve(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """ + Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD. + The server will respond with a `conversation.item.retrieved` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "conversation.item.retrieve", "item_id": item_id, "event_id": event_id}), + ) + ) + + +class AsyncRealtimeOutputAudioBufferResource(BaseAsyncRealtimeConnectionResource): + async def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """**WebRTC Only:** Emit to cut off the current audio response. + + This will trigger the server to + stop generating audio and emit a `output_audio_buffer.cleared` event. This + event should be preceded by a `response.cancel` client event to stop the + generation of the current response. + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + await self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "output_audio_buffer.clear", "event_id": event_id})) + ) diff --git a/portkey_ai/_vendor/openai/resources/responses/input_items.py b/portkey_ai/_vendor/openai/resources/responses/input_items.py index a425a65c..9f3ef637 100644 --- a/portkey_ai/_vendor/openai/resources/responses/input_items.py +++ b/portkey_ai/_vendor/openai/resources/responses/input_items.py @@ -47,7 +47,6 @@ def list( response_id: str, *, after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, @@ -64,8 +63,6 @@ def list( Args: after: An item ID to list items after, used in pagination. - before: An item ID to list items before, used in pagination. - include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. @@ -98,7 +95,6 @@ def list( query=maybe_transform( { "after": after, - "before": before, "include": include, "limit": limit, "order": order, @@ -135,7 +131,6 @@ def list( response_id: str, *, after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, @@ -152,8 +147,6 @@ def list( Args: after: An item ID to list items after, used in pagination. - before: An item ID to list items before, used in pagination. - include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. @@ -186,7 +179,6 @@ def list( query=maybe_transform( { "after": after, - "before": before, "include": include, "limit": limit, "order": order, diff --git a/portkey_ai/_vendor/openai/resources/responses/responses.py b/portkey_ai/_vendor/openai/resources/responses/responses.py index 81ae4e5b..8acdb10b 100644 --- a/portkey_ai/_vendor/openai/resources/responses/responses.py +++ b/portkey_ai/_vendor/openai/resources/responses/responses.py @@ -10,7 +10,7 @@ from ... import _legacy_response from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import is_given, required_args, maybe_transform, async_maybe_transform +from ..._utils import is_given, maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper @@ -31,7 +31,6 @@ parse_response, type_to_text_format_param as _type_to_text_format_param, ) -from ...types.shared.chat_model import ChatModel from ...types.responses.response import Response from ...types.responses.tool_param import ToolParam, ParseableToolParam from ...types.shared_params.metadata import Metadata @@ -41,6 +40,7 @@ from ...types.responses.response_includable import ResponseIncludable from ...types.shared_params.responses_model import ResponsesModel from ...types.responses.response_input_param import ResponseInputParam +from ...types.responses.response_prompt_param import ResponsePromptParam from ...types.responses.response_stream_event import ResponseStreamEvent from ...types.responses.response_text_config_param import ResponseTextConfigParam @@ -75,23 +75,30 @@ def with_streaming_response(self) -> ResponsesWithStreamingResponse: def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -117,43 +124,44 @@ def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: - - `file_search_call.results`: Include the search results of the file search tool + - `web_search_call.action.sources`: Include the sources of the web search tool call. - - `message.input_image.image_url`: Include image urls from the input message. + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in reasoning item outputs. This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly (like when the `store` parameter is set to `false`, or when an organization is enrolled in the zero data retention program). - - `code_interpreter_call.outputs`: Includes the outputs of python code execution - in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -163,6 +171,11 @@ def create( including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a + response. This maximum number applies across all built-in tool calls, not per + individual tool. Any further attempts to call a tool by the model will be + ignored. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. @@ -170,34 +183,53 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). - reasoning: **o-series models only** + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. store: Whether to store the generated model response for later retrieval via API. @@ -208,6 +240,8 @@ def create( [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) for more information. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -226,7 +260,7 @@ def create( tools: An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. - The two categories of tools you can provide the model are: + We support the following categories of tools: - **Built-in tools**: Tools that are provided by OpenAI that extend the model's capabilities, like @@ -234,9 +268,17 @@ def create( [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). + - **MCP Tools**: Integrations with third-party systems via custom MCP servers or + predefined connectors such as Google Drive and SharePoint. Learn more about + [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 @@ -246,15 +288,17 @@ def create( truncation: The truncation strategy to use for the model response. - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size + - `auto`: If the input to this Response exceeds the model's context window size, + the model will truncate the response to fit the context window by dropping + items from the beginning of the conversation. + - `disabled` (default): If the input size will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -270,23 +314,30 @@ def create( def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: Literal[True], background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -312,22 +363,6 @@ def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -338,24 +373,41 @@ def create( background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: - - `file_search_call.results`: Include the search results of the file search tool + - `web_search_call.action.sources`: Include the sources of the web search tool call. - - `message.input_image.image_url`: Include image urls from the input message. + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in reasoning item outputs. This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly (like when the `store` parameter is set to `false`, or when an organization is enrolled in the zero data retention program). - - `code_interpreter_call.outputs`: Includes the outputs of python code execution - in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -365,6 +417,11 @@ def create( including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a + response. This maximum number applies across all built-in tool calls, not per + individual tool. Any further attempts to call a tool by the model will be + ignored. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. @@ -372,37 +429,58 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. store: Whether to store the generated model response for later retrieval via API. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -421,7 +499,7 @@ def create( tools: An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. - The two categories of tools you can provide the model are: + We support the following categories of tools: - **Built-in tools**: Tools that are provided by OpenAI that extend the model's capabilities, like @@ -429,9 +507,17 @@ def create( [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). + - **MCP Tools**: Integrations with third-party systems via custom MCP servers or + predefined connectors such as Google Drive and SharePoint. Learn more about + [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 @@ -441,15 +527,17 @@ def create( truncation: The truncation strategy to use for the model response. - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size + - `auto`: If the input to this Response exceeds the model's context window size, + the model will truncate the response to fit the context window by dropping + items from the beginning of the conversation. + - `disabled` (default): If the input size will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -465,23 +553,30 @@ def create( def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: bool, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -507,22 +602,6 @@ def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -533,24 +612,41 @@ def create( background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: - - `file_search_call.results`: Include the search results of the file search tool + - `web_search_call.action.sources`: Include the sources of the web search tool call. - - `message.input_image.image_url`: Include image urls from the input message. + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in reasoning item outputs. This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly (like when the `store` parameter is set to `false`, or when an organization is enrolled in the zero data retention program). - - `code_interpreter_call.outputs`: Includes the outputs of python code execution - in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -560,6 +656,11 @@ def create( including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a + response. This maximum number applies across all built-in tool calls, not per + individual tool. Any further attempts to call a tool by the model will be + ignored. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. @@ -567,37 +668,58 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). - reasoning: **o-series models only** + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. store: Whether to store the generated model response for later retrieval via API. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -616,7 +738,7 @@ def create( tools: An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. - The two categories of tools you can provide the model are: + We support the following categories of tools: - **Built-in tools**: Tools that are provided by OpenAI that extend the model's capabilities, like @@ -624,9 +746,17 @@ def create( [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). + - **MCP Tools**: Integrations with third-party systems via custom MCP servers or + predefined connectors such as Google Drive and SharePoint. Learn more about + [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 @@ -636,15 +766,17 @@ def create( truncation: The truncation strategy to use for the model response. - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size + - `auto`: If the input to this Response exceeds the model's context window size, + the model will truncate the response to fit the context window by dropping + items from the beginning of the conversation. + - `disabled` (default): If the input size will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -656,27 +788,33 @@ def create( """ ... - @required_args(["input", "model"], ["input", "model", "stream"]) def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -691,23 +829,30 @@ def create( "/responses", body=maybe_transform( { - "input": input, - "model": model, "background": background, + "conversation": conversation, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, + "max_tool_calls": max_tool_calls, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, "service_tier": service_tier, "store": store, "stream": stream, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, "tools": tools, + "top_logprobs": top_logprobs, "top_p": top_p, "truncation": truncation, "user": user, @@ -744,21 +889,29 @@ def stream( self, *, input: Union[str, ResponseInputParam], - model: Union[str, ChatModel], + model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -775,21 +928,29 @@ def stream( *, response_id: str | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, background: Optional[bool] | NotGiven = NOT_GIVEN, text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -804,17 +965,25 @@ def stream( new_response_args = { "input": input, "model": model, + "conversation": conversation, "include": include, "instructions": instructions, "max_output_tokens": max_output_tokens, + "max_tool_calls": max_tool_calls, "metadata": metadata, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, + "service_tier": service_tier, "store": store, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, + "top_logprobs": top_logprobs, "top_p": top_p, "truncation": truncation, "user": user, @@ -849,18 +1018,26 @@ def stream( input=input, model=model, tools=tools, + conversation=conversation, include=include, instructions=instructions, max_output_tokens=max_output_tokens, + max_tool_calls=max_tool_calls, metadata=metadata, parallel_tool_calls=parallel_tool_calls, previous_response_id=previous_response_id, + prompt=prompt, + prompt_cache_key=prompt_cache_key, store=store, + stream_options=stream_options, stream=True, temperature=temperature, text=text, tool_choice=tool_choice, reasoning=reasoning, + safety_identifier=safety_identifier, + service_tier=service_tier, + top_logprobs=top_logprobs, top_p=top_p, truncation=truncation, user=user, @@ -895,25 +1072,35 @@ def stream( def parse( self, *, - input: Union[str, ResponseInputParam], - model: Union[str, ChatModel], text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -943,24 +1130,34 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "/responses", body=maybe_transform( { - "input": input, - "model": model, + "background": background, + "conversation": conversation, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, + "max_tool_calls": max_tool_calls, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, + "service_tier": service_tier, "store": store, "stream": stream, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, "tools": tools, + "top_logprobs": top_logprobs, "top_p": top_p, "truncation": truncation, "user": user, + "verbosity": verbosity, }, response_create_params.ResponseCreateParams, ), @@ -982,6 +1179,7 @@ def retrieve( response_id: str, *, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -1046,6 +1244,13 @@ def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. stream: If set to true, the model response data will be streamed to the client as it is @@ -1072,6 +1277,7 @@ def retrieve( *, stream: Literal[True], include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1094,6 +1300,13 @@ def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. extra_headers: Send extra headers @@ -1113,6 +1326,7 @@ def retrieve( *, stream: bool, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1135,6 +1349,13 @@ def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. extra_headers: Send extra headers @@ -1152,6 +1373,7 @@ def retrieve( response_id: str, *, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -1173,6 +1395,7 @@ def retrieve( query=maybe_transform( { "include": include, + "include_obfuscation": include_obfuscation, "starting_after": starting_after, "stream": stream, }, @@ -1283,23 +1506,30 @@ def with_streaming_response(self) -> AsyncResponsesWithStreamingResponse: async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1325,43 +1555,44 @@ async def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: - - `file_search_call.results`: Include the search results of the file search tool + - `web_search_call.action.sources`: Include the sources of the web search tool call. - - `message.input_image.image_url`: Include image urls from the input message. + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in reasoning item outputs. This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly (like when the `store` parameter is set to `false`, or when an organization is enrolled in the zero data retention program). - - `code_interpreter_call.outputs`: Includes the outputs of python code execution - in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -1371,6 +1602,11 @@ async def create( including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a + response. This maximum number applies across all built-in tool calls, not per + individual tool. Any further attempts to call a tool by the model will be + ignored. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. @@ -1378,34 +1614,53 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. store: Whether to store the generated model response for later retrieval via API. @@ -1416,6 +1671,8 @@ async def create( [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) for more information. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -1434,7 +1691,7 @@ async def create( tools: An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. - The two categories of tools you can provide the model are: + We support the following categories of tools: - **Built-in tools**: Tools that are provided by OpenAI that extend the model's capabilities, like @@ -1442,9 +1699,17 @@ async def create( [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). + - **MCP Tools**: Integrations with third-party systems via custom MCP servers or + predefined connectors such as Google Drive and SharePoint. Learn more about + [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 @@ -1454,15 +1719,17 @@ async def create( truncation: The truncation strategy to use for the model response. - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size + - `auto`: If the input to this Response exceeds the model's context window size, + the model will truncate the response to fit the context window by dropping + items from the beginning of the conversation. + - `disabled` (default): If the input size will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -1478,23 +1745,30 @@ async def create( async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: Literal[True], background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1520,22 +1794,6 @@ async def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -1546,24 +1804,41 @@ async def create( background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: - - `file_search_call.results`: Include the search results of the file search tool + - `web_search_call.action.sources`: Include the sources of the web search tool call. - - `message.input_image.image_url`: Include image urls from the input message. + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in reasoning item outputs. This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly (like when the `store` parameter is set to `false`, or when an organization is enrolled in the zero data retention program). - - `code_interpreter_call.outputs`: Includes the outputs of python code execution - in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -1573,6 +1848,11 @@ async def create( including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a + response. This maximum number applies across all built-in tool calls, not per + individual tool. Any further attempts to call a tool by the model will be + ignored. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. @@ -1580,37 +1860,58 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). - reasoning: **o-series models only** + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. store: Whether to store the generated model response for later retrieval via API. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -1629,7 +1930,7 @@ async def create( tools: An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. - The two categories of tools you can provide the model are: + We support the following categories of tools: - **Built-in tools**: Tools that are provided by OpenAI that extend the model's capabilities, like @@ -1637,9 +1938,17 @@ async def create( [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). + - **MCP Tools**: Integrations with third-party systems via custom MCP servers or + predefined connectors such as Google Drive and SharePoint. Learn more about + [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 @@ -1649,15 +1958,17 @@ async def create( truncation: The truncation strategy to use for the model response. - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size + - `auto`: If the input to this Response exceeds the model's context window size, + the model will truncate the response to fit the context window by dropping + items from the beginning of the conversation. + - `disabled` (default): If the input size will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -1673,23 +1984,30 @@ async def create( async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: bool, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1715,22 +2033,6 @@ async def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -1741,24 +2043,41 @@ async def create( background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: - - `file_search_call.results`: Include the search results of the file search tool + - `web_search_call.action.sources`: Include the sources of the web search tool call. - - `message.input_image.image_url`: Include image urls from the input message. + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in reasoning item outputs. This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly (like when the `store` parameter is set to `false`, or when an organization is enrolled in the zero data retention program). - - `code_interpreter_call.outputs`: Includes the outputs of python code execution - in code interpreter tool call items. - instructions: Inserts a system (or developer) message as the first item in the model's - context. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -1768,6 +2087,11 @@ async def create( including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a + response. This maximum number applies across all built-in tool calls, not per + individual tool. Any further attempts to call a tool by the model will be + ignored. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. @@ -1775,37 +2099,58 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + + prompt: Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). - service_tier: Specifies the latency tier to use for processing the request. This parameter is - relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + + service_tier: Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. store: Whether to store the generated model response for later retrieval via API. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -1824,7 +2169,7 @@ async def create( tools: An array of tools the model may call while generating a response. You can specify which tool to use by setting the `tool_choice` parameter. - The two categories of tools you can provide the model are: + We support the following categories of tools: - **Built-in tools**: Tools that are provided by OpenAI that extend the model's capabilities, like @@ -1832,9 +2177,17 @@ async def create( [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). + - **MCP Tools**: Integrations with third-party systems via custom MCP servers or + predefined connectors such as Google Drive and SharePoint. Learn more about + [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 @@ -1844,15 +2197,17 @@ async def create( truncation: The truncation strategy to use for the model response. - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size + - `auto`: If the input to this Response exceeds the model's context window size, + the model will truncate the response to fit the context window by dropping + items from the beginning of the conversation. + - `disabled` (default): If the input size will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -1864,27 +2219,33 @@ async def create( """ ... - @required_args(["input", "model"], ["input", "model", "stream"]) async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, - service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1899,23 +2260,30 @@ async def create( "/responses", body=await async_maybe_transform( { - "input": input, - "model": model, "background": background, + "conversation": conversation, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, + "max_tool_calls": max_tool_calls, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, "service_tier": service_tier, "store": store, "stream": stream, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, "tools": tools, + "top_logprobs": top_logprobs, "top_p": top_p, "truncation": truncation, "user": user, @@ -1952,21 +2320,29 @@ def stream( self, *, input: Union[str, ResponseInputParam], - model: Union[str, ChatModel], + model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1983,21 +2359,29 @@ def stream( *, response_id: str | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, - model: Union[str, ChatModel] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, background: Optional[bool] | NotGiven = NOT_GIVEN, text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -2012,17 +2396,25 @@ def stream( new_response_args = { "input": input, "model": model, + "conversation": conversation, "include": include, "instructions": instructions, "max_output_tokens": max_output_tokens, + "max_tool_calls": max_tool_calls, "metadata": metadata, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, + "service_tier": service_tier, "store": store, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, + "top_logprobs": top_logprobs, "top_p": top_p, "truncation": truncation, "user": user, @@ -2058,20 +2450,29 @@ def stream( model=model, stream=True, tools=tools, + conversation=conversation, include=include, instructions=instructions, max_output_tokens=max_output_tokens, + max_tool_calls=max_tool_calls, metadata=metadata, parallel_tool_calls=parallel_tool_calls, previous_response_id=previous_response_id, + prompt=prompt, + prompt_cache_key=prompt_cache_key, store=store, + stream_options=stream_options, temperature=temperature, text=text, tool_choice=tool_choice, reasoning=reasoning, + safety_identifier=safety_identifier, + service_tier=service_tier, + top_logprobs=top_logprobs, top_p=top_p, truncation=truncation, user=user, + background=background, extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, @@ -2107,25 +2508,35 @@ def stream( async def parse( self, *, - input: Union[str, ResponseInputParam], - model: Union[str, ChatModel], text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -2155,24 +2566,34 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "/responses", body=maybe_transform( { - "input": input, - "model": model, + "background": background, + "conversation": conversation, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, + "max_tool_calls": max_tool_calls, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, + "service_tier": service_tier, "store": store, "stream": stream, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, "tools": tools, + "top_logprobs": top_logprobs, "top_p": top_p, "truncation": truncation, "user": user, + "verbosity": verbosity, }, response_create_params.ResponseCreateParams, ), @@ -2194,6 +2615,7 @@ async def retrieve( response_id: str, *, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -2258,6 +2680,13 @@ async def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. stream: If set to true, the model response data will be streamed to the client as it is @@ -2284,6 +2713,7 @@ async def retrieve( *, stream: Literal[True], include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2306,6 +2736,13 @@ async def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. extra_headers: Send extra headers @@ -2325,6 +2762,7 @@ async def retrieve( *, stream: bool, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2347,6 +2785,13 @@ async def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. extra_headers: Send extra headers @@ -2364,6 +2809,7 @@ async def retrieve( response_id: str, *, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -2385,6 +2831,7 @@ async def retrieve( query=await async_maybe_transform( { "include": include, + "include_obfuscation": include_obfuscation, "starting_after": starting_after, "stream": stream, }, diff --git a/portkey_ai/_vendor/openai/resources/uploads/uploads.py b/portkey_ai/_vendor/openai/resources/uploads/uploads.py index ecfcee48..8811bed4 100644 --- a/portkey_ai/_vendor/openai/resources/uploads/uploads.py +++ b/portkey_ai/_vendor/openai/resources/uploads/uploads.py @@ -6,7 +6,7 @@ import os import logging import builtins -from typing import List, overload +from typing import overload from pathlib import Path import anyio @@ -22,7 +22,7 @@ AsyncPartsWithStreamingResponse, ) from ...types import FilePurpose, upload_create_params, upload_complete_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -170,6 +170,7 @@ def create( filename: str, mime_type: str, purpose: FilePurpose, + expires_after: upload_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -213,6 +214,9 @@ def create( See the [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -229,6 +233,7 @@ def create( "filename": filename, "mime_type": mime_type, "purpose": purpose, + "expires_after": expires_after, }, upload_create_params.UploadCreateParams, ), @@ -276,7 +281,7 @@ def complete( self, upload_id: str, *, - part_ids: List[str], + part_ids: SequenceNotStr[str], md5: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -473,6 +478,7 @@ async def create( filename: str, mime_type: str, purpose: FilePurpose, + expires_after: upload_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -516,6 +522,9 @@ async def create( See the [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -532,6 +541,7 @@ async def create( "filename": filename, "mime_type": mime_type, "purpose": purpose, + "expires_after": expires_after, }, upload_create_params.UploadCreateParams, ), @@ -579,7 +589,7 @@ async def complete( self, upload_id: str, *, - part_ids: List[str], + part_ids: SequenceNotStr[str], md5: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. diff --git a/portkey_ai/_vendor/openai/resources/vector_stores/file_batches.py b/portkey_ai/_vendor/openai/resources/vector_stores/file_batches.py index 4dd4430b..adf399d8 100644 --- a/portkey_ai/_vendor/openai/resources/vector_stores/file_batches.py +++ b/portkey_ai/_vendor/openai/resources/vector_stores/file_batches.py @@ -3,7 +3,7 @@ from __future__ import annotations import asyncio -from typing import Dict, List, Iterable, Optional +from typing import Dict, Iterable, Optional from typing_extensions import Union, Literal from concurrent.futures import Future, ThreadPoolExecutor, as_completed @@ -12,7 +12,7 @@ from ... import _legacy_response from ...types import FileChunkingStrategyParam -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes, SequenceNotStr from ..._utils import is_given, maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -52,7 +52,7 @@ def create( self, vector_store_id: str, *, - file_ids: List[str], + file_ids: SequenceNotStr[str], attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -186,7 +186,7 @@ def create_and_poll( self, vector_store_id: str, *, - file_ids: List[str], + file_ids: SequenceNotStr[str], poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFileBatch: @@ -320,7 +320,7 @@ def upload_and_poll( *, files: Iterable[FileTypes], max_concurrency: int = 5, - file_ids: List[str] = [], + file_ids: SequenceNotStr[str] = [], poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFileBatch: @@ -389,7 +389,7 @@ async def create( self, vector_store_id: str, *, - file_ids: List[str], + file_ids: SequenceNotStr[str], attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -523,7 +523,7 @@ async def create_and_poll( self, vector_store_id: str, *, - file_ids: List[str], + file_ids: SequenceNotStr[str], poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFileBatch: @@ -657,7 +657,7 @@ async def upload_and_poll( *, files: Iterable[FileTypes], max_concurrency: int = 5, - file_ids: List[str] = [], + file_ids: SequenceNotStr[str] = [], poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFileBatch: diff --git a/portkey_ai/_vendor/openai/resources/vector_stores/files.py b/portkey_ai/_vendor/openai/resources/vector_stores/files.py index f8603846..2c90bb7a 100644 --- a/portkey_ai/_vendor/openai/resources/vector_stores/files.py +++ b/portkey_ai/_vendor/openai/resources/vector_stores/files.py @@ -304,11 +304,14 @@ def create_and_poll( file_id: str, *, vector_store_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: """Attach a file to the given vector store and wait for it to be processed.""" - self.create(vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy) + self.create( + vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy, attributes=attributes + ) return self.poll( file_id, @@ -377,6 +380,7 @@ def upload_and_poll( *, vector_store_id: str, file: FileTypes, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: @@ -387,6 +391,7 @@ def upload_and_poll( file_id=file_obj.id, chunking_strategy=chunking_strategy, poll_interval_ms=poll_interval_ms, + attributes=attributes, ) def content( @@ -707,11 +712,14 @@ async def create_and_poll( file_id: str, *, vector_store_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: """Attach a file to the given vector store and wait for it to be processed.""" - await self.create(vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy) + await self.create( + vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy, attributes=attributes + ) return await self.poll( file_id, @@ -782,6 +790,7 @@ async def upload_and_poll( *, vector_store_id: str, file: FileTypes, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: @@ -792,6 +801,7 @@ async def upload_and_poll( file_id=file_obj.id, poll_interval_ms=poll_interval_ms, chunking_strategy=chunking_strategy, + attributes=attributes, ) def content( diff --git a/portkey_ai/_vendor/openai/resources/vector_stores/vector_stores.py b/portkey_ai/_vendor/openai/resources/vector_stores/vector_stores.py index 9fc17b18..4f211ea2 100644 --- a/portkey_ai/_vendor/openai/resources/vector_stores/vector_stores.py +++ b/portkey_ai/_vendor/openai/resources/vector_stores/vector_stores.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import Union, Optional from typing_extensions import Literal import httpx @@ -23,7 +23,7 @@ vector_store_search_params, vector_store_update_params, ) -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource @@ -80,7 +80,7 @@ def create( *, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, - file_ids: List[str] | NotGiven = NOT_GIVEN, + file_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -332,7 +332,7 @@ def search( self, vector_store_id: str, *, - query: Union[str, List[str]], + query: Union[str, SequenceNotStr[str]], filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN, max_num_results: int | NotGiven = NOT_GIVEN, ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN, @@ -425,7 +425,7 @@ async def create( *, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, - file_ids: List[str] | NotGiven = NOT_GIVEN, + file_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -677,7 +677,7 @@ def search( self, vector_store_id: str, *, - query: Union[str, List[str]], + query: Union[str, SequenceNotStr[str]], filters: vector_store_search_params.Filters | NotGiven = NOT_GIVEN, max_num_results: int | NotGiven = NOT_GIVEN, ranking_options: vector_store_search_params.RankingOptions | NotGiven = NOT_GIVEN, diff --git a/portkey_ai/_vendor/openai/resources/webhooks.py b/portkey_ai/_vendor/openai/resources/webhooks.py new file mode 100644 index 00000000..3e13d3fa --- /dev/null +++ b/portkey_ai/_vendor/openai/resources/webhooks.py @@ -0,0 +1,210 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import hmac +import json +import time +import base64 +import hashlib +from typing import cast + +from .._types import HeadersLike +from .._utils import get_required_header +from .._models import construct_type +from .._resource import SyncAPIResource, AsyncAPIResource +from .._exceptions import InvalidWebhookSignatureError +from ..types.webhooks.unwrap_webhook_event import UnwrapWebhookEvent + +__all__ = ["Webhooks", "AsyncWebhooks"] + + +class Webhooks(SyncAPIResource): + def unwrap( + self, + payload: str | bytes, + headers: HeadersLike, + *, + secret: str | None = None, + ) -> UnwrapWebhookEvent: + """Validates that the given payload was sent by OpenAI and parses the payload.""" + if secret is None: + secret = self._client.webhook_secret + + self.verify_signature(payload=payload, headers=headers, secret=secret) + + return cast( + UnwrapWebhookEvent, + construct_type( + type_=UnwrapWebhookEvent, + value=json.loads(payload), + ), + ) + + def verify_signature( + self, + payload: str | bytes, + headers: HeadersLike, + *, + secret: str | None = None, + tolerance: int = 300, + ) -> None: + """Validates whether or not the webhook payload was sent by OpenAI. + + Args: + payload: The webhook payload + headers: The webhook headers + secret: The webhook secret (optional, will use client secret if not provided) + tolerance: Maximum age of the webhook in seconds (default: 300 = 5 minutes) + """ + if secret is None: + secret = self._client.webhook_secret + + if secret is None: + raise ValueError( + "The webhook secret must either be set using the env var, OPENAI_WEBHOOK_SECRET, " + "on the client class, OpenAI(webhook_secret='123'), or passed to this function" + ) + + signature_header = get_required_header(headers, "webhook-signature") + timestamp = get_required_header(headers, "webhook-timestamp") + webhook_id = get_required_header(headers, "webhook-id") + + # Validate timestamp to prevent replay attacks + try: + timestamp_seconds = int(timestamp) + except ValueError: + raise InvalidWebhookSignatureError("Invalid webhook timestamp format") from None + + now = int(time.time()) + + if now - timestamp_seconds > tolerance: + raise InvalidWebhookSignatureError("Webhook timestamp is too old") from None + + if timestamp_seconds > now + tolerance: + raise InvalidWebhookSignatureError("Webhook timestamp is too new") from None + + # Extract signatures from v1, format + # The signature header can have multiple values, separated by spaces. + # Each value is in the format v1,. We should accept if any match. + signatures: list[str] = [] + for part in signature_header.split(): + if part.startswith("v1,"): + signatures.append(part[3:]) + else: + signatures.append(part) + + # Decode the secret if it starts with whsec_ + if secret.startswith("whsec_"): + decoded_secret = base64.b64decode(secret[6:]) + else: + decoded_secret = secret.encode() + + body = payload.decode("utf-8") if isinstance(payload, bytes) else payload + + # Prepare the signed payload (OpenAI uses webhookId.timestamp.payload format) + signed_payload = f"{webhook_id}.{timestamp}.{body}" + expected_signature = base64.b64encode( + hmac.new(decoded_secret, signed_payload.encode(), hashlib.sha256).digest() + ).decode() + + # Accept if any signature matches + if not any(hmac.compare_digest(expected_signature, sig) for sig in signatures): + raise InvalidWebhookSignatureError( + "The given webhook signature does not match the expected signature" + ) from None + + +class AsyncWebhooks(AsyncAPIResource): + def unwrap( + self, + payload: str | bytes, + headers: HeadersLike, + *, + secret: str | None = None, + ) -> UnwrapWebhookEvent: + """Validates that the given payload was sent by OpenAI and parses the payload.""" + if secret is None: + secret = self._client.webhook_secret + + self.verify_signature(payload=payload, headers=headers, secret=secret) + + body = payload.decode("utf-8") if isinstance(payload, bytes) else payload + return cast( + UnwrapWebhookEvent, + construct_type( + type_=UnwrapWebhookEvent, + value=json.loads(body), + ), + ) + + def verify_signature( + self, + payload: str | bytes, + headers: HeadersLike, + *, + secret: str | None = None, + tolerance: int = 300, + ) -> None: + """Validates whether or not the webhook payload was sent by OpenAI. + + Args: + payload: The webhook payload + headers: The webhook headers + secret: The webhook secret (optional, will use client secret if not provided) + tolerance: Maximum age of the webhook in seconds (default: 300 = 5 minutes) + """ + if secret is None: + secret = self._client.webhook_secret + + if secret is None: + raise ValueError( + "The webhook secret must either be set using the env var, OPENAI_WEBHOOK_SECRET, " + "on the client class, OpenAI(webhook_secret='123'), or passed to this function" + ) from None + + signature_header = get_required_header(headers, "webhook-signature") + timestamp = get_required_header(headers, "webhook-timestamp") + webhook_id = get_required_header(headers, "webhook-id") + + # Validate timestamp to prevent replay attacks + try: + timestamp_seconds = int(timestamp) + except ValueError: + raise InvalidWebhookSignatureError("Invalid webhook timestamp format") from None + + now = int(time.time()) + + if now - timestamp_seconds > tolerance: + raise InvalidWebhookSignatureError("Webhook timestamp is too old") from None + + if timestamp_seconds > now + tolerance: + raise InvalidWebhookSignatureError("Webhook timestamp is too new") from None + + # Extract signatures from v1, format + # The signature header can have multiple values, separated by spaces. + # Each value is in the format v1,. We should accept if any match. + signatures: list[str] = [] + for part in signature_header.split(): + if part.startswith("v1,"): + signatures.append(part[3:]) + else: + signatures.append(part) + + # Decode the secret if it starts with whsec_ + if secret.startswith("whsec_"): + decoded_secret = base64.b64decode(secret[6:]) + else: + decoded_secret = secret.encode() + + body = payload.decode("utf-8") if isinstance(payload, bytes) else payload + + # Prepare the signed payload (OpenAI uses webhookId.timestamp.payload format) + signed_payload = f"{webhook_id}.{timestamp}.{body}" + expected_signature = base64.b64encode( + hmac.new(decoded_secret, signed_payload.encode(), hashlib.sha256).digest() + ).decode() + + # Accept if any signature matches + if not any(hmac.compare_digest(expected_signature, sig) for sig in signatures): + raise InvalidWebhookSignatureError("The given webhook signature does not match the expected signature") diff --git a/portkey_ai/_vendor/openai/types/__init__.py b/portkey_ai/_vendor/openai/types/__init__.py index 453b26f5..1844f71b 100644 --- a/portkey_ai/_vendor/openai/types/__init__.py +++ b/portkey_ai/_vendor/openai/types/__init__.py @@ -18,8 +18,11 @@ FunctionDefinition as FunctionDefinition, FunctionParameters as FunctionParameters, ResponseFormatText as ResponseFormatText, + CustomToolInputFormat as CustomToolInputFormat, ResponseFormatJSONObject as ResponseFormatJSONObject, ResponseFormatJSONSchema as ResponseFormatJSONSchema, + ResponseFormatTextPython as ResponseFormatTextPython, + ResponseFormatTextGrammar as ResponseFormatTextGrammar, ) from .upload import Upload as Upload from .embedding import Embedding as Embedding @@ -60,15 +63,19 @@ from .image_generate_params import ImageGenerateParams as ImageGenerateParams from .eval_retrieve_response import EvalRetrieveResponse as EvalRetrieveResponse from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy +from .image_gen_stream_event import ImageGenStreamEvent as ImageGenStreamEvent from .upload_complete_params import UploadCompleteParams as UploadCompleteParams from .container_create_params import ContainerCreateParams as ContainerCreateParams from .container_list_response import ContainerListResponse as ContainerListResponse from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams +from .image_edit_stream_event import ImageEditStreamEvent as ImageEditStreamEvent from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .moderation_create_params import ModerationCreateParams as ModerationCreateParams from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams from .container_create_response import ContainerCreateResponse as ContainerCreateResponse from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse +from .image_gen_completed_event import ImageGenCompletedEvent as ImageGenCompletedEvent +from .image_edit_completed_event import ImageEditCompletedEvent as ImageEditCompletedEvent from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams @@ -79,8 +86,10 @@ from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams +from .image_gen_partial_image_event import ImageGenPartialImageEvent as ImageGenPartialImageEvent from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy from .eval_custom_data_source_config import EvalCustomDataSourceConfig as EvalCustomDataSourceConfig +from .image_edit_partial_image_event import ImageEditPartialImageEvent as ImageEditPartialImageEvent from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam diff --git a/portkey_ai/_vendor/openai/types/audio/speech_create_params.py b/portkey_ai/_vendor/openai/types/audio/speech_create_params.py index 905ca5c3..634d7881 100644 --- a/portkey_ai/_vendor/openai/types/audio/speech_create_params.py +++ b/portkey_ai/_vendor/openai/types/audio/speech_create_params.py @@ -21,9 +21,7 @@ class SpeechCreateParams(TypedDict, total=False): """ voice: Required[ - Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]] ] """The voice to use when generating the audio. @@ -48,6 +46,12 @@ class SpeechCreateParams(TypedDict, total=False): speed: float """The speed of the generated audio. - Select a value from `0.25` to `4.0`. `1.0` is the default. Does not work with - `gpt-4o-mini-tts`. + Select a value from `0.25` to `4.0`. `1.0` is the default. + """ + + stream_format: Literal["sse", "audio"] + """The format to stream the audio in. + + Supported formats are `sse` and `audio`. `sse` is not supported for `tts-1` or + `tts-1-hd`. """ diff --git a/portkey_ai/_vendor/openai/types/audio/transcription.py b/portkey_ai/_vendor/openai/types/audio/transcription.py index 15763854..4c588215 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription.py @@ -1,10 +1,12 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias +from ..._utils import PropertyInfo from ..._models import BaseModel -__all__ = ["Transcription", "Logprob"] +__all__ = ["Transcription", "Logprob", "Usage", "UsageTokens", "UsageTokensInputTokenDetails", "UsageDuration"] class Logprob(BaseModel): @@ -18,6 +20,42 @@ class Logprob(BaseModel): """The log probability of the token.""" +class UsageTokensInputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """Number of audio tokens billed for this request.""" + + text_tokens: Optional[int] = None + """Number of text tokens billed for this request.""" + + +class UsageTokens(BaseModel): + input_tokens: int + """Number of input tokens billed for this request.""" + + output_tokens: int + """Number of output tokens generated.""" + + total_tokens: int + """Total number of tokens used (input + output).""" + + type: Literal["tokens"] + """The type of the usage object. Always `tokens` for this variant.""" + + input_token_details: Optional[UsageTokensInputTokenDetails] = None + """Details about the input tokens billed for this request.""" + + +class UsageDuration(BaseModel): + seconds: float + """Duration of the input audio in seconds.""" + + type: Literal["duration"] + """The type of the usage object. Always `duration` for this variant.""" + + +Usage: TypeAlias = Annotated[Union[UsageTokens, UsageDuration], PropertyInfo(discriminator="type")] + + class Transcription(BaseModel): text: str """The transcribed text.""" @@ -28,3 +66,6 @@ class Transcription(BaseModel): Only returned with the models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added to the `include` array. """ + + usage: Optional[Usage] = None + """Token usage statistics for the request.""" diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_text_done_event.py b/portkey_ai/_vendor/openai/types/audio/transcription_text_done_event.py index c8875a1b..9665edc5 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription_text_done_event.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription_text_done_event.py @@ -5,7 +5,7 @@ from ..._models import BaseModel -__all__ = ["TranscriptionTextDoneEvent", "Logprob"] +__all__ = ["TranscriptionTextDoneEvent", "Logprob", "Usage", "UsageInputTokenDetails"] class Logprob(BaseModel): @@ -19,6 +19,31 @@ class Logprob(BaseModel): """The log probability of the token.""" +class UsageInputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """Number of audio tokens billed for this request.""" + + text_tokens: Optional[int] = None + """Number of text tokens billed for this request.""" + + +class Usage(BaseModel): + input_tokens: int + """Number of input tokens billed for this request.""" + + output_tokens: int + """Number of output tokens generated.""" + + total_tokens: int + """Total number of tokens used (input + output).""" + + type: Literal["tokens"] + """The type of the usage object. Always `tokens` for this variant.""" + + input_token_details: Optional[UsageInputTokenDetails] = None + """Details about the input tokens billed for this request.""" + + class TranscriptionTextDoneEvent(BaseModel): text: str """The text that was transcribed.""" @@ -33,3 +58,6 @@ class TranscriptionTextDoneEvent(BaseModel): [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `include[]` parameter set to `logprobs`. """ + + usage: Optional[Usage] = None + """Usage statistics for models billed by token usage.""" diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_verbose.py b/portkey_ai/_vendor/openai/types/audio/transcription_verbose.py index 2a670189..addda71e 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription_verbose.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription_verbose.py @@ -1,12 +1,21 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Optional +from typing_extensions import Literal from ..._models import BaseModel from .transcription_word import TranscriptionWord from .transcription_segment import TranscriptionSegment -__all__ = ["TranscriptionVerbose"] +__all__ = ["TranscriptionVerbose", "Usage"] + + +class Usage(BaseModel): + seconds: float + """Duration of the input audio in seconds.""" + + type: Literal["duration"] + """The type of the usage object. Always `duration` for this variant.""" class TranscriptionVerbose(BaseModel): @@ -22,5 +31,8 @@ class TranscriptionVerbose(BaseModel): segments: Optional[List[TranscriptionSegment]] = None """Segments of the transcribed text and their corresponding details.""" + usage: Optional[Usage] = None + """Usage statistics for models billed by audio input duration.""" + words: Optional[List[TranscriptionWord]] = None """Extracted words and their corresponding timestamps.""" diff --git a/portkey_ai/_vendor/openai/types/batch_create_params.py b/portkey_ai/_vendor/openai/types/batch_create_params.py index cc95afd3..c0f9034d 100644 --- a/portkey_ai/_vendor/openai/types/batch_create_params.py +++ b/portkey_ai/_vendor/openai/types/batch_create_params.py @@ -7,7 +7,7 @@ from .shared_params.metadata import Metadata -__all__ = ["BatchCreateParams"] +__all__ = ["BatchCreateParams", "OutputExpiresAfter"] class BatchCreateParams(TypedDict, total=False): @@ -47,3 +47,24 @@ class BatchCreateParams(TypedDict, total=False): Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. """ + + output_expires_after: OutputExpiresAfter + """ + The expiration policy for the output and/or error file that are generated for a + batch. + """ + + +class OutputExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `created_at`. Note that the anchor is the file creation time, + not the time the batch is created. + """ + + seconds: Required[int] + """The number of seconds after the anchor time that the file will expire. + + Must be between 3600 (1 hour) and 2592000 (30 days). + """ diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py b/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py index 8b3c3318..07f8f28f 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Union, Iterable, Optional +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr from ..shared.chat_model import ChatModel from .assistant_tool_param import AssistantToolParam from ..shared_params.metadata import Metadata @@ -58,12 +59,12 @@ class AssistantCreateParams(TypedDict, total=False): """The name of the assistant. The maximum length is 256 characters.""" reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** - + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ response_format: Optional[AssistantResponseFormatOptionParam] @@ -123,7 +124,7 @@ class AssistantCreateParams(TypedDict, total=False): class ToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] + file_ids: SequenceNotStr[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files @@ -170,7 +171,7 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): If not set, will use the `auto` strategy. """ - file_ids: List[str] + file_ids: SequenceNotStr[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector @@ -189,7 +190,7 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): class ToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] + vector_store_ids: SequenceNotStr[str] """ The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py b/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py index b28094a6..45d9f984 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Union, Iterable, Optional +from typing import Union, Iterable, Optional from typing_extensions import Literal, TypedDict +from ..._types import SequenceNotStr from .assistant_tool_param import AssistantToolParam from ..shared_params.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort @@ -36,6 +37,12 @@ class AssistantUpdateParams(TypedDict, total=False): model: Union[ str, Literal[ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", @@ -87,12 +94,12 @@ class AssistantUpdateParams(TypedDict, total=False): """The name of the assistant. The maximum length is 256 characters.""" reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** - + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ response_format: Optional[AssistantResponseFormatOptionParam] @@ -152,7 +159,7 @@ class AssistantUpdateParams(TypedDict, total=False): class ToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] + file_ids: SequenceNotStr[str] """ Overrides the list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available @@ -162,7 +169,7 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): class ToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] + vector_store_ids: SequenceNotStr[str] """ Overrides the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item.py index 4edf6c4d..21b7a8ac 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item.py @@ -50,8 +50,8 @@ class ConversationItem(BaseModel): for `message` items. """ - status: Optional[Literal["completed", "incomplete"]] = None - """The status of the item (`completed`, `incomplete`). + status: Optional[Literal["completed", "incomplete", "in_progress"]] = None + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content.py index ab40a4a1..fe9cef80 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content.py @@ -23,7 +23,10 @@ class ConversationItemContent(BaseModel): """The text content, used for `input_text` and `text` content types.""" transcript: Optional[str] = None - """The transcript of the audio, used for `input_audio` content type.""" + """The transcript of the audio, used for `input_audio` and `audio` content types.""" - type: Optional[Literal["input_text", "input_audio", "item_reference", "text"]] = None - """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" + type: Optional[Literal["input_text", "input_audio", "item_reference", "text", "audio"]] = None + """ + The content type (`input_text`, `input_audio`, `item_reference`, `text`, + `audio`). + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content_param.py index 7a3a92a3..6042e7f9 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content_param.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content_param.py @@ -22,7 +22,10 @@ class ConversationItemContentParam(TypedDict, total=False): """The text content, used for `input_text` and `text` content types.""" transcript: str - """The transcript of the audio, used for `input_audio` content type.""" + """The transcript of the audio, used for `input_audio` and `audio` content types.""" - type: Literal["input_text", "input_audio", "item_reference", "text"] - """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" + type: Literal["input_text", "input_audio", "item_reference", "text", "audio"] + """ + The content type (`input_text`, `input_audio`, `item_reference`, `text`, + `audio`). + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_created_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_created_event.py index 2f203882..aea7ad5b 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_created_event.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_created_event.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Optional from typing_extensions import Literal from ...._models import BaseModel @@ -15,11 +16,12 @@ class ConversationItemCreatedEvent(BaseModel): item: ConversationItem """The item to add to the conversation.""" - previous_item_id: str + type: Literal["conversation.item.created"] + """The event type, must be `conversation.item.created`.""" + + previous_item_id: Optional[str] = None """ The ID of the preceding item in the Conversation context, allows the client to - understand the order of the conversation. + understand the order of the conversation. Can be `null` if the item has no + predecessor. """ - - type: Literal["conversation.item.created"] - """The event type, must be `conversation.item.created`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py index 46981169..e7c457d4 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py @@ -1,11 +1,54 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional -from typing_extensions import Literal +from typing import List, Union, Optional +from typing_extensions import Literal, TypeAlias from ...._models import BaseModel -__all__ = ["ConversationItemInputAudioTranscriptionCompletedEvent", "Logprob"] +__all__ = [ + "ConversationItemInputAudioTranscriptionCompletedEvent", + "Usage", + "UsageTranscriptTextUsageTokens", + "UsageTranscriptTextUsageTokensInputTokenDetails", + "UsageTranscriptTextUsageDuration", + "Logprob", +] + + +class UsageTranscriptTextUsageTokensInputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """Number of audio tokens billed for this request.""" + + text_tokens: Optional[int] = None + """Number of text tokens billed for this request.""" + + +class UsageTranscriptTextUsageTokens(BaseModel): + input_tokens: int + """Number of input tokens billed for this request.""" + + output_tokens: int + """Number of output tokens generated.""" + + total_tokens: int + """Total number of tokens used (input + output).""" + + type: Literal["tokens"] + """The type of the usage object. Always `tokens` for this variant.""" + + input_token_details: Optional[UsageTranscriptTextUsageTokensInputTokenDetails] = None + """Details about the input tokens billed for this request.""" + + +class UsageTranscriptTextUsageDuration(BaseModel): + seconds: float + """Duration of the input audio in seconds.""" + + type: Literal["duration"] + """The type of the usage object. Always `duration` for this variant.""" + + +Usage: TypeAlias = Union[UsageTranscriptTextUsageTokens, UsageTranscriptTextUsageDuration] class Logprob(BaseModel): @@ -37,5 +80,8 @@ class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel): The event type, must be `conversation.item.input_audio_transcription.completed`. """ + usage: Usage + """Usage statistics for the transcription.""" + logprobs: Optional[List[Logprob]] = None """The log probabilities of the transcription.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_param.py index ac0f8431..8bbd539c 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_param.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_param.py @@ -51,8 +51,8 @@ class ConversationItemParam(TypedDict, total=False): for `message` items. """ - status: Literal["completed", "incomplete"] - """The status of the item (`completed`, `incomplete`). + status: Literal["completed", "incomplete", "in_progress"] + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_with_reference.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_with_reference.py index 31806afc..0edcfc76 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_with_reference.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_with_reference.py @@ -4,9 +4,29 @@ from typing_extensions import Literal from ...._models import BaseModel -from .conversation_item_content import ConversationItemContent -__all__ = ["ConversationItemWithReference"] +__all__ = ["ConversationItemWithReference", "Content"] + + +class Content(BaseModel): + id: Optional[str] = None + """ + ID of a previous conversation item to reference (for `item_reference` content + types in `response.create` events). These can reference both client and server + created items. + """ + + audio: Optional[str] = None + """Base64-encoded audio bytes, used for `input_audio` content type.""" + + text: Optional[str] = None + """The text content, used for `input_text` and `text` content types.""" + + transcript: Optional[str] = None + """The transcript of the audio, used for `input_audio` content type.""" + + type: Optional[Literal["input_text", "input_audio", "item_reference", "text"]] = None + """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" class ConversationItemWithReference(BaseModel): @@ -30,7 +50,7 @@ class ConversationItemWithReference(BaseModel): `function_call` item with the same ID exists in the conversation history. """ - content: Optional[List[ConversationItemContent]] = None + content: Optional[List[Content]] = None """The content of the message, applicable for `message` items. - Message items of role `system` support only `input_text` content @@ -53,8 +73,8 @@ class ConversationItemWithReference(BaseModel): for `message` items. """ - status: Optional[Literal["completed", "incomplete"]] = None - """The status of the item (`completed`, `incomplete`). + status: Optional[Literal["completed", "incomplete", "in_progress"]] = None + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_with_reference_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_with_reference_param.py index e266cdce..c83dc92a 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_with_reference_param.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_with_reference_param.py @@ -5,9 +5,28 @@ from typing import Iterable from typing_extensions import Literal, TypedDict -from .conversation_item_content_param import ConversationItemContentParam +__all__ = ["ConversationItemWithReferenceParam", "Content"] -__all__ = ["ConversationItemWithReferenceParam"] + +class Content(TypedDict, total=False): + id: str + """ + ID of a previous conversation item to reference (for `item_reference` content + types in `response.create` events). These can reference both client and server + created items. + """ + + audio: str + """Base64-encoded audio bytes, used for `input_audio` content type.""" + + text: str + """The text content, used for `input_text` and `text` content types.""" + + transcript: str + """The transcript of the audio, used for `input_audio` content type.""" + + type: Literal["input_text", "input_audio", "item_reference", "text"] + """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" class ConversationItemWithReferenceParam(TypedDict, total=False): @@ -31,7 +50,7 @@ class ConversationItemWithReferenceParam(TypedDict, total=False): `function_call` item with the same ID exists in the conversation history. """ - content: Iterable[ConversationItemContentParam] + content: Iterable[Content] """The content of the message, applicable for `message` items. - Message items of role `system` support only `input_text` content @@ -54,8 +73,8 @@ class ConversationItemWithReferenceParam(TypedDict, total=False): for `message` items. """ - status: Literal["completed", "incomplete"] - """The status of the item (`completed`, `incomplete`). + status: Literal["completed", "incomplete", "in_progress"] + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_committed_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_committed_event.py index 3071eff3..22eb53b1 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_committed_event.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_committed_event.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Optional from typing_extensions import Literal from ...._models import BaseModel @@ -14,8 +15,11 @@ class InputAudioBufferCommittedEvent(BaseModel): item_id: str """The ID of the user message item that will be created.""" - previous_item_id: str - """The ID of the preceding item after which the new item will be inserted.""" - type: Literal["input_audio_buffer.committed"] """The event type, must be `input_audio_buffer.committed`.""" + + previous_item_id: Optional[str] = None + """ + The ID of the preceding item after which the new item will be inserted. Can be + `null` if the item has no predecessor. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response.py b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response.py index 8ecfb91c..ccc97c5d 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response.py @@ -60,10 +60,10 @@ class RealtimeResponse(BaseModel): output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" - status: Optional[Literal["completed", "cancelled", "failed", "incomplete"]] = None + status: Optional[Literal["completed", "cancelled", "failed", "incomplete", "in_progress"]] = None """ The final status of the response (`completed`, `cancelled`, `failed`, or - `incomplete`). + `incomplete`, `in_progress`). """ status_details: Optional[RealtimeResponseStatus] = None @@ -80,13 +80,8 @@ class RealtimeResponse(BaseModel): will become the input for later turns. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """ The voice the model used to respond. Current voice options are `alloy`, `ash`, - `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - `verse`. + `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event.py index 3b8a6de8..7219cedb 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event.py @@ -101,16 +101,12 @@ class Response(BaseModel): tools: Optional[List[ResponseTool]] = None """Tools (functions) available to the model.""" - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event_param.py index c569d507..b4d54bba 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event_param.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event_param.py @@ -102,14 +102,12 @@ class Response(TypedDict, total=False): tools: Iterable[ResponseTool] """Tools (functions) available to the model.""" - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session.py b/portkey_ai/_vendor/openai/types/beta/realtime/session.py index 606fd838..f478a92f 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/session.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session.py @@ -260,7 +260,7 @@ class Session(BaseModel): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and @@ -268,14 +268,10 @@ class Session(BaseModel): natural conversations, but may have a higher latency. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session_create_params.py b/portkey_ai/_vendor/openai/types/beta/realtime/session_create_params.py index cebf67c7..8a477f98 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/session_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session_create_params.py @@ -3,12 +3,12 @@ from __future__ import annotations from typing import List, Union, Iterable -from typing_extensions import Literal, TypeAlias, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict __all__ = [ "SessionCreateParams", "ClientSecret", - "ClientSecretExpiresAt", + "ClientSecretExpiresAfter", "InputAudioNoiseReduction", "InputAudioTranscription", "Tool", @@ -137,7 +137,7 @@ class SessionCreateParams(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and @@ -145,19 +145,17 @@ class SessionCreateParams(TypedDict, total=False): natural conversations, but may have a higher latency. """ - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ -class ClientSecretExpiresAt(TypedDict, total=False): - anchor: Literal["created_at"] +class ClientSecretExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] """The anchor point for the ephemeral token expiration. Only `created_at` is currently supported. @@ -171,7 +169,7 @@ class ClientSecretExpiresAt(TypedDict, total=False): class ClientSecret(TypedDict, total=False): - expires_at: ClientSecretExpiresAt + expires_after: ClientSecretExpiresAfter """Configuration for the ephemeral token expiration.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session_create_response.py b/portkey_ai/_vendor/openai/types/beta/realtime/session_create_response.py index 81fed95f..471da036 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/session_create_response.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session_create_response.py @@ -33,10 +33,7 @@ class ClientSecret(BaseModel): class InputAudioTranscription(BaseModel): model: Optional[str] = None - """ - The model to use for transcription, `whisper-1` is the only currently supported - model. - """ + """The model to use for transcription.""" class Tool(BaseModel): @@ -116,8 +113,8 @@ class SessionCreateResponse(BaseModel): Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously and should be treated as rough guidance rather than the + representation understood by the model. """ instructions: Optional[str] = None @@ -190,14 +187,10 @@ class SessionCreateResponse(BaseModel): speech. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo` `sage`, `shimmer` and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event.py index 8bb6a0e2..11929ab3 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event.py @@ -9,7 +9,7 @@ "SessionUpdateEvent", "Session", "SessionClientSecret", - "SessionClientSecretExpiresAt", + "SessionClientSecretExpiresAfter", "SessionInputAudioNoiseReduction", "SessionInputAudioTranscription", "SessionTool", @@ -19,8 +19,8 @@ ] -class SessionClientSecretExpiresAt(BaseModel): - anchor: Optional[Literal["created_at"]] = None +class SessionClientSecretExpiresAfter(BaseModel): + anchor: Literal["created_at"] """The anchor point for the ephemeral token expiration. Only `created_at` is currently supported. @@ -34,7 +34,7 @@ class SessionClientSecretExpiresAt(BaseModel): class SessionClientSecret(BaseModel): - expires_at: Optional[SessionClientSecretExpiresAt] = None + expires_after: Optional[SessionClientSecretExpiresAfter] = None """Configuration for the ephemeral token expiration.""" @@ -282,7 +282,7 @@ class Session(BaseModel): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and @@ -290,16 +290,12 @@ class Session(BaseModel): natural conversations, but may have a higher latency. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event_param.py index a10de540..e939f4cc 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event_param.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event_param.py @@ -9,7 +9,7 @@ "SessionUpdateEventParam", "Session", "SessionClientSecret", - "SessionClientSecretExpiresAt", + "SessionClientSecretExpiresAfter", "SessionInputAudioNoiseReduction", "SessionInputAudioTranscription", "SessionTool", @@ -19,8 +19,8 @@ ] -class SessionClientSecretExpiresAt(TypedDict, total=False): - anchor: Literal["created_at"] +class SessionClientSecretExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] """The anchor point for the ephemeral token expiration. Only `created_at` is currently supported. @@ -34,7 +34,7 @@ class SessionClientSecretExpiresAt(TypedDict, total=False): class SessionClientSecret(TypedDict, total=False): - expires_at: SessionClientSecretExpiresAt + expires_after: SessionClientSecretExpiresAfter """Configuration for the ephemeral token expiration.""" @@ -280,7 +280,7 @@ class Session(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and @@ -288,14 +288,12 @@ class Session(TypedDict, total=False): natural conversations, but may have a higher latency. """ - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_create_params.py b/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_create_params.py index 15b2f14c..3ac3af4f 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_create_params.py @@ -61,7 +61,7 @@ class TranscriptionSessionCreateParams(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_update.py b/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_update.py index 73253b68..5ae1ad22 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_update.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_update.py @@ -165,7 +165,7 @@ class Session(BaseModel): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_update_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_update_param.py index 6b38a9af..d7065f61 100644 --- a/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_update_param.py +++ b/portkey_ai/_vendor/openai/types/beta/realtime/transcription_session_update_param.py @@ -165,7 +165,7 @@ class Session(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py b/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py index d8137105..734e5e2a 100644 --- a/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py +++ b/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Union, Iterable, Optional +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr from ..shared.chat_model import ChatModel from .assistant_tool_param import AssistantToolParam from ..shared_params.metadata import Metadata @@ -169,7 +170,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): truncation_strategy: Optional[TruncationStrategy] """Controls for how a thread will be truncated prior to the run. - Use this to control the intial context window of the run. + Use this to control the initial context window of the run. """ @@ -217,7 +218,7 @@ class ThreadMessage(TypedDict, total=False): class ThreadToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] + file_ids: SequenceNotStr[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files @@ -265,7 +266,7 @@ class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False): If not set, will use the `auto` strategy. """ - file_ids: List[str] + file_ids: SequenceNotStr[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector @@ -284,7 +285,7 @@ class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False): class ThreadToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] + vector_store_ids: SequenceNotStr[str] """ The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) @@ -334,7 +335,7 @@ class Thread(TypedDict, total=False): class ToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] + file_ids: SequenceNotStr[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files @@ -343,7 +344,7 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): class ToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] + vector_store_ids: SequenceNotStr[str] """ The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) diff --git a/portkey_ai/_vendor/openai/types/beta/thread_create_params.py b/portkey_ai/_vendor/openai/types/beta/thread_create_params.py index ec1ccf19..8fd9f38d 100644 --- a/portkey_ai/_vendor/openai/types/beta/thread_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/thread_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Union, Iterable, Optional +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr from ..shared_params.metadata import Metadata from .code_interpreter_tool_param import CodeInterpreterToolParam from .threads.message_content_part_param import MessageContentPartParam @@ -96,7 +97,7 @@ class Message(TypedDict, total=False): class ToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] + file_ids: SequenceNotStr[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files @@ -143,7 +144,7 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): If not set, will use the `auto` strategy. """ - file_ids: List[str] + file_ids: SequenceNotStr[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector @@ -162,7 +163,7 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): class ToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] + vector_store_ids: SequenceNotStr[str] """ The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) diff --git a/portkey_ai/_vendor/openai/types/beta/thread_update_params.py b/portkey_ai/_vendor/openai/types/beta/thread_update_params.py index b47ea8f3..464ea8d7 100644 --- a/portkey_ai/_vendor/openai/types/beta/thread_update_params.py +++ b/portkey_ai/_vendor/openai/types/beta/thread_update_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Optional +from typing import Optional from typing_extensions import TypedDict +from ..._types import SequenceNotStr from ..shared_params.metadata import Metadata __all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] @@ -31,7 +32,7 @@ class ThreadUpdateParams(TypedDict, total=False): class ToolResourcesCodeInterpreter(TypedDict, total=False): - file_ids: List[str] + file_ids: SequenceNotStr[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files @@ -40,7 +41,7 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): class ToolResourcesFileSearch(TypedDict, total=False): - vector_store_ids: List[str] + vector_store_ids: SequenceNotStr[str] """ The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) diff --git a/portkey_ai/_vendor/openai/types/beta/threads/run.py b/portkey_ai/_vendor/openai/types/beta/threads/run.py index da9418d6..c545cc37 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/run.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/run.py @@ -228,7 +228,7 @@ class Run(BaseModel): truncation_strategy: Optional[TruncationStrategy] = None """Controls for how a thread will be truncated prior to the run. - Use this to control the intial context window of the run. + Use this to control the initial context window of the run. """ usage: Optional[Usage] = None diff --git a/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py b/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py index fc702278..cfd272f5 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py @@ -108,12 +108,12 @@ class RunCreateParamsBase(TypedDict, total=False): """ reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** - + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ response_format: Optional[AssistantResponseFormatOptionParam] @@ -176,7 +176,7 @@ class RunCreateParamsBase(TypedDict, total=False): truncation_strategy: Optional[TruncationStrategy] """Controls for how a thread will be truncated prior to the run. - Use this to control the intial context window of the run. + Use this to control the initial context window of the run. """ diff --git a/portkey_ai/_vendor/openai/types/chat/__init__.py b/portkey_ai/_vendor/openai/types/chat/__init__.py index 0945bcad..50bdac7c 100644 --- a/portkey_ai/_vendor/openai/types/chat/__init__.py +++ b/portkey_ai/_vendor/openai/types/chat/__init__.py @@ -4,7 +4,6 @@ from .chat_completion import ChatCompletion as ChatCompletion from .chat_completion_role import ChatCompletionRole as ChatCompletionRole -from .chat_completion_tool import ChatCompletionTool as ChatCompletionTool from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .completion_list_params import CompletionListParams as CompletionListParams @@ -24,14 +23,24 @@ ) from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam +from .chat_completion_function_tool import ChatCompletionFunctionTool as ChatCompletionFunctionTool from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam from .chat_completion_store_message import ChatCompletionStoreMessage as ChatCompletionStoreMessage from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort -from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall +from .chat_completion_tool_union_param import ChatCompletionToolUnionParam as ChatCompletionToolUnionParam +from .chat_completion_content_part_text import ChatCompletionContentPartText as ChatCompletionContentPartText +from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam as ChatCompletionCustomToolParam +from .chat_completion_message_tool_call import ( + ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + ChatCompletionMessageToolCallUnion as ChatCompletionMessageToolCallUnion, +) +from .chat_completion_content_part_image import ChatCompletionContentPartImage as ChatCompletionContentPartImage from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam from .chat_completion_user_message_param import ChatCompletionUserMessageParam as ChatCompletionUserMessageParam +from .chat_completion_allowed_tools_param import ChatCompletionAllowedToolsParam as ChatCompletionAllowedToolsParam +from .chat_completion_function_tool_param import ChatCompletionFunctionToolParam as ChatCompletionFunctionToolParam from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam from .chat_completion_system_message_param import ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam from .chat_completion_function_message_param import ( @@ -55,18 +64,39 @@ from .chat_completion_content_part_image_param import ( ChatCompletionContentPartImageParam as ChatCompletionContentPartImageParam, ) +from .chat_completion_message_custom_tool_call import ( + ChatCompletionMessageCustomToolCall as ChatCompletionMessageCustomToolCall, +) from .chat_completion_prediction_content_param import ( ChatCompletionPredictionContentParam as ChatCompletionPredictionContentParam, ) from .chat_completion_tool_choice_option_param import ( ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam, ) +from .chat_completion_allowed_tool_choice_param import ( + ChatCompletionAllowedToolChoiceParam as ChatCompletionAllowedToolChoiceParam, +) from .chat_completion_content_part_refusal_param import ( ChatCompletionContentPartRefusalParam as ChatCompletionContentPartRefusalParam, ) from .chat_completion_function_call_option_param import ( ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam, ) +from .chat_completion_message_function_tool_call import ( + ChatCompletionMessageFunctionToolCall as ChatCompletionMessageFunctionToolCall, +) +from .chat_completion_message_tool_call_union_param import ( + ChatCompletionMessageToolCallUnionParam as ChatCompletionMessageToolCallUnionParam, +) from .chat_completion_content_part_input_audio_param import ( ChatCompletionContentPartInputAudioParam as ChatCompletionContentPartInputAudioParam, ) +from .chat_completion_message_custom_tool_call_param import ( + ChatCompletionMessageCustomToolCallParam as ChatCompletionMessageCustomToolCallParam, +) +from .chat_completion_named_tool_choice_custom_param import ( + ChatCompletionNamedToolChoiceCustomParam as ChatCompletionNamedToolChoiceCustomParam, +) +from .chat_completion_message_function_tool_call_param import ( + ChatCompletionMessageFunctionToolCallParam as ChatCompletionMessageFunctionToolCallParam, +) diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion.py b/portkey_ai/_vendor/openai/types/chat/chat_completion.py index 49af1a3d..6bc4bafe 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion.py @@ -59,25 +59,23 @@ class ChatCompletion(BaseModel): object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" - service_tier: Optional[Literal["auto", "default", "flex"]] = None - """Specifies the latency tier to use for processing the request. - - This parameter is relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None + """Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. """ system_fingerprint: Optional[str] = None diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tool_choice_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tool_choice_param.py new file mode 100644 index 00000000..813e6293 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tool_choice_param.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .chat_completion_allowed_tools_param import ChatCompletionAllowedToolsParam + +__all__ = ["ChatCompletionAllowedToolChoiceParam"] + + +class ChatCompletionAllowedToolChoiceParam(TypedDict, total=False): + allowed_tools: Required[ChatCompletionAllowedToolsParam] + """Constrains the tools available to the model to a pre-defined set.""" + + type: Required[Literal["allowed_tools"]] + """Allowed tool configuration type. Always `allowed_tools`.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tools_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tools_param.py new file mode 100644 index 00000000..d9b72d8f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_allowed_tools_param.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionAllowedToolsParam"] + + +class ChatCompletionAllowedToolsParam(TypedDict, total=False): + mode: Required[Literal["auto", "required"]] + """Constrains the tools available to the model to a pre-defined set. + + `auto` allows the model to pick from among the allowed tools and generate a + message. + + `required` requires the model to call one or more of the allowed tools. + """ + + tools: Required[Iterable[Dict[str, object]]] + """A list of tool definitions that the model should be allowed to call. + + For the Chat Completions API, the list of tool definitions might look like: + + ```json + [ + { "type": "function", "function": { "name": "get_weather" } }, + { "type": "function", "function": { "name": "get_time" } } + ] + ``` + """ diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py index 35e3a3d7..212d933e 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py @@ -6,8 +6,8 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam -from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam +from .chat_completion_message_tool_call_union_param import ChatCompletionMessageToolCallUnionParam __all__ = ["ChatCompletionAssistantMessageParam", "Audio", "ContentArrayOfContentPart", "FunctionCall"] @@ -66,5 +66,5 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): refusal: Optional[str] """The refusal message by the assistant.""" - tool_calls: Iterable[ChatCompletionMessageToolCallParam] + tool_calls: Iterable[ChatCompletionMessageToolCallUnionParam] """The tool calls generated by the model, such as function calls.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py index 25caada1..b1576b41 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py @@ -16,9 +16,7 @@ class ChatCompletionAudioParam(TypedDict, total=False): """ voice: Required[ - Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]] ] """The voice the model uses to respond. diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_chunk.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_chunk.py index c109e10f..ea32d157 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_chunk.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_chunk.py @@ -128,25 +128,23 @@ class ChatCompletionChunk(BaseModel): object: Literal["chat.completion.chunk"] """The object type, which is always `chat.completion.chunk`.""" - service_tier: Optional[Literal["auto", "default", "flex"]] = None - """Specifies the latency tier to use for processing the request. - - This parameter is relevant for customers subscribed to the scale tier service: - - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None + """Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. """ system_fingerprint: Optional[str] = None diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image.py new file mode 100644 index 00000000..c1386b9d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionContentPartImage", "ImageURL"] + + +class ImageURL(BaseModel): + url: str + """Either a URL of the image or the base64 encoded image data.""" + + detail: Optional[Literal["auto", "low", "high"]] = None + """Specifies the detail level of the image. + + Learn more in the + [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + """ + + +class ChatCompletionContentPartImage(BaseModel): + image_url: ImageURL + + type: Literal["image_url"] + """The type of the content part.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text.py new file mode 100644 index 00000000..f09f35f7 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_text.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionContentPartText"] + + +class ChatCompletionContentPartText(BaseModel): + text: str + """The text content.""" + + type: Literal["text"] + """The type of the content part.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_custom_tool_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_custom_tool_param.py new file mode 100644 index 00000000..14959ee4 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_custom_tool_param.py @@ -0,0 +1,58 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = [ + "ChatCompletionCustomToolParam", + "Custom", + "CustomFormat", + "CustomFormatText", + "CustomFormatGrammar", + "CustomFormatGrammarGrammar", +] + + +class CustomFormatText(TypedDict, total=False): + type: Required[Literal["text"]] + """Unconstrained text format. Always `text`.""" + + +class CustomFormatGrammarGrammar(TypedDict, total=False): + definition: Required[str] + """The grammar definition.""" + + syntax: Required[Literal["lark", "regex"]] + """The syntax of the grammar definition. One of `lark` or `regex`.""" + + +class CustomFormatGrammar(TypedDict, total=False): + grammar: Required[CustomFormatGrammarGrammar] + """Your chosen grammar.""" + + type: Required[Literal["grammar"]] + """Grammar format. Always `grammar`.""" + + +CustomFormat: TypeAlias = Union[CustomFormatText, CustomFormatGrammar] + + +class Custom(TypedDict, total=False): + name: Required[str] + """The name of the custom tool, used to identify it in tool calls.""" + + description: str + """Optional description of the custom tool, used to provide more context.""" + + format: CustomFormat + """The input format for the custom tool. Default is unconstrained text.""" + + +class ChatCompletionCustomToolParam(TypedDict, total=False): + custom: Required[Custom] + """Properties of the custom tool.""" + + type: Required[Literal["custom"]] + """The type of the custom tool. Always `custom`.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_tool.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool.py similarity index 80% rename from portkey_ai/_vendor/openai/types/chat/chat_completion_tool.py rename to portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool.py index ae9126f9..641568ac 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_tool.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool.py @@ -5,10 +5,10 @@ from ..._models import BaseModel from ..shared.function_definition import FunctionDefinition -__all__ = ["ChatCompletionTool"] +__all__ = ["ChatCompletionFunctionTool"] -class ChatCompletionTool(BaseModel): +class ChatCompletionFunctionTool(BaseModel): function: FunctionDefinition type: Literal["function"] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool_param.py new file mode 100644 index 00000000..a39feea5 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_function_tool_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from ..shared_params.function_definition import FunctionDefinition + +__all__ = ["ChatCompletionFunctionToolParam"] + + +class ChatCompletionFunctionToolParam(TypedDict, total=False): + function: Required[FunctionDefinition] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py index c659ac3d..5bb153fe 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py @@ -5,7 +5,7 @@ from ..._models import BaseModel from .chat_completion_audio import ChatCompletionAudio -from .chat_completion_message_tool_call import ChatCompletionMessageToolCall +from .chat_completion_message_tool_call import ChatCompletionMessageToolCallUnion __all__ = ["ChatCompletionMessage", "Annotation", "AnnotationURLCitation", "FunctionCall"] @@ -75,5 +75,5 @@ class ChatCompletionMessage(BaseModel): model. """ - tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None + tool_calls: Optional[List[ChatCompletionMessageToolCallUnion]] = None """The tool calls generated by the model, such as function calls.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call.py new file mode 100644 index 00000000..b13c176a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionMessageCustomToolCall", "Custom"] + + +class Custom(BaseModel): + input: str + """The input for the custom tool call generated by the model.""" + + name: str + """The name of the custom tool to call.""" + + +class ChatCompletionMessageCustomToolCall(BaseModel): + id: str + """The ID of the tool call.""" + + custom: Custom + """The custom tool that the model called.""" + + type: Literal["custom"] + """The type of the tool. Always `custom`.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call_param.py new file mode 100644 index 00000000..3753e0f2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_custom_tool_call_param.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionMessageCustomToolCallParam", "Custom"] + + +class Custom(TypedDict, total=False): + input: Required[str] + """The input for the custom tool call generated by the model.""" + + name: Required[str] + """The name of the custom tool to call.""" + + +class ChatCompletionMessageCustomToolCallParam(TypedDict, total=False): + id: Required[str] + """The ID of the tool call.""" + + custom: Required[Custom] + """The custom tool that the model called.""" + + type: Required[Literal["custom"]] + """The type of the tool. Always `custom`.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call.py new file mode 100644 index 00000000..d056d9af --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionMessageFunctionToolCall", "Function"] + + +class Function(BaseModel): + arguments: str + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: str + """The name of the function to call.""" + + +class ChatCompletionMessageFunctionToolCall(BaseModel): + id: str + """The ID of the tool call.""" + + function: Function + """The function that the model called.""" + + type: Literal["function"] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call_param.py new file mode 100644 index 00000000..7c827edd --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_function_tool_call_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionMessageFunctionToolCallParam", "Function"] + + +class Function(TypedDict, total=False): + arguments: Required[str] + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Required[str] + """The name of the function to call.""" + + +class ChatCompletionMessageFunctionToolCallParam(TypedDict, total=False): + id: Required[str] + """The ID of the tool call.""" + + function: Required[Function] + """The function that the model called.""" + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call.py index 4fec6670..71ac63f5 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call.py @@ -1,31 +1,17 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal +from typing import Union +from typing_extensions import Annotated, TypeAlias -from ..._models import BaseModel +from ..._utils import PropertyInfo +from .chat_completion_message_custom_tool_call import ChatCompletionMessageCustomToolCall +from .chat_completion_message_function_tool_call import Function as Function, ChatCompletionMessageFunctionToolCall -__all__ = ["ChatCompletionMessageToolCall", "Function"] +__all__ = ["Function", "ChatCompletionMessageToolCallUnion"] +ChatCompletionMessageToolCallUnion: TypeAlias = Annotated[ + Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall], + PropertyInfo(discriminator="type"), +] -class Function(BaseModel): - arguments: str - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: str - """The name of the function to call.""" - - -class ChatCompletionMessageToolCall(BaseModel): - id: str - """The ID of the tool call.""" - - function: Function - """The function that the model called.""" - - type: Literal["function"] - """The type of the tool. Currently, only `function` is supported.""" +ChatCompletionMessageToolCall: TypeAlias = ChatCompletionMessageFunctionToolCall diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call_param.py index f616c363..6baa1b57 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call_param.py @@ -2,30 +2,13 @@ from __future__ import annotations -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import TypeAlias -__all__ = ["ChatCompletionMessageToolCallParam", "Function"] - - -class Function(TypedDict, total=False): - arguments: Required[str] - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: Required[str] - """The name of the function to call.""" +from .chat_completion_message_function_tool_call_param import ( + Function as Function, + ChatCompletionMessageFunctionToolCallParam, +) +__all__ = ["ChatCompletionMessageToolCallParam", "Function"] -class ChatCompletionMessageToolCallParam(TypedDict, total=False): - id: Required[str] - """The ID of the tool call.""" - - function: Required[Function] - """The function that the model called.""" - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" +ChatCompletionMessageToolCallParam: TypeAlias = ChatCompletionMessageFunctionToolCallParam diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call_union_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call_union_param.py new file mode 100644 index 00000000..fcca9bb1 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_tool_call_union_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .chat_completion_message_custom_tool_call_param import ChatCompletionMessageCustomToolCallParam +from .chat_completion_message_function_tool_call_param import ChatCompletionMessageFunctionToolCallParam + +__all__ = ["ChatCompletionMessageToolCallUnionParam"] + +ChatCompletionMessageToolCallUnionParam: TypeAlias = Union[ + ChatCompletionMessageFunctionToolCallParam, ChatCompletionMessageCustomToolCallParam +] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_custom_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_custom_param.py new file mode 100644 index 00000000..1c123c0a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_custom_param.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionNamedToolChoiceCustomParam", "Custom"] + + +class Custom(TypedDict, total=False): + name: Required[str] + """The name of the custom tool to call.""" + + +class ChatCompletionNamedToolChoiceCustomParam(TypedDict, total=False): + custom: Required[Custom] + + type: Required[Literal["custom"]] + """For custom tool calling, the type is always `custom`.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_param.py index 369f8b42..ae1acfb9 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_named_tool_choice_param.py @@ -16,4 +16,4 @@ class ChatCompletionNamedToolChoiceParam(TypedDict, total=False): function: Required[Function] type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" + """For function calling, the type is always `function`.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_store_message.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_store_message.py index 8dc093f7..66134271 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_store_message.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_store_message.py @@ -1,10 +1,23 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List, Union, Optional +from typing_extensions import TypeAlias + from .chat_completion_message import ChatCompletionMessage +from .chat_completion_content_part_text import ChatCompletionContentPartText +from .chat_completion_content_part_image import ChatCompletionContentPartImage + +__all__ = ["ChatCompletionStoreMessage", "ChatCompletionStoreMessageContentPart"] -__all__ = ["ChatCompletionStoreMessage"] +ChatCompletionStoreMessageContentPart: TypeAlias = Union[ChatCompletionContentPartText, ChatCompletionContentPartImage] class ChatCompletionStoreMessage(ChatCompletionMessage): id: str """The identifier of the chat message.""" + + content_parts: Optional[List[ChatCompletionStoreMessageContentPart]] = None + """ + If a content parts array was provided, this is an array of `text` and + `image_url` parts. Otherwise, null. + """ diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_stream_options_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_stream_options_param.py index 471e0eba..fc3191d2 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_stream_options_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_stream_options_param.py @@ -8,6 +8,17 @@ class ChatCompletionStreamOptionsParam(TypedDict, total=False): + include_obfuscation: bool + """When true, stream obfuscation will be enabled. + + Stream obfuscation adds random characters to an `obfuscation` field on streaming + delta events to normalize payload sizes as a mitigation to certain side-channel + attacks. These obfuscation fields are included by default, but add a small + amount of overhead to the data stream. You can set `include_obfuscation` to + false to optimize for bandwidth if you trust the network links between your + application and the OpenAI API. + """ + include_usage: bool """If set, an additional chunk will be streamed before the `data: [DONE]` message. diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_choice_option_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_choice_option_param.py index 7dedf041..f3bb0a46 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_choice_option_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_choice_option_param.py @@ -6,9 +6,14 @@ from typing_extensions import Literal, TypeAlias from .chat_completion_named_tool_choice_param import ChatCompletionNamedToolChoiceParam +from .chat_completion_allowed_tool_choice_param import ChatCompletionAllowedToolChoiceParam +from .chat_completion_named_tool_choice_custom_param import ChatCompletionNamedToolChoiceCustomParam __all__ = ["ChatCompletionToolChoiceOptionParam"] ChatCompletionToolChoiceOptionParam: TypeAlias = Union[ - Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam + Literal["none", "auto", "required"], + ChatCompletionAllowedToolChoiceParam, + ChatCompletionNamedToolChoiceParam, + ChatCompletionNamedToolChoiceCustomParam, ] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_param.py index 6c2b1a36..a18b13b4 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_param.py @@ -2,15 +2,13 @@ from __future__ import annotations -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import TypeAlias -from ..shared_params.function_definition import FunctionDefinition +from .chat_completion_function_tool_param import ( + FunctionDefinition as FunctionDefinition, + ChatCompletionFunctionToolParam, +) -__all__ = ["ChatCompletionToolParam"] +__all__ = ["ChatCompletionToolParam", "FunctionDefinition"] - -class ChatCompletionToolParam(TypedDict, total=False): - function: Required[FunctionDefinition] - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" +ChatCompletionToolParam: TypeAlias = ChatCompletionFunctionToolParam diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_union_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_union_param.py new file mode 100644 index 00000000..0f8bf7b0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_union_param.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam +from .chat_completion_function_tool_param import ChatCompletionFunctionToolParam + +__all__ = ["ChatCompletionToolUnionParam"] + +ChatCompletionToolUnionParam: TypeAlias = Union[ChatCompletionFunctionToolParam, ChatCompletionCustomToolParam] diff --git a/portkey_ai/_vendor/openai/types/chat/completion_create_params.py b/portkey_ai/_vendor/openai/types/chat/completion_create_params.py index e55cc2d0..2ae81dfb 100644 --- a/portkey_ai/_vendor/openai/types/chat/completion_create_params.py +++ b/portkey_ai/_vendor/openai/types/chat/completion_create_params.py @@ -5,12 +5,13 @@ from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr from ..shared.chat_model import ChatModel from ..shared_params.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort -from .chat_completion_tool_param import ChatCompletionToolParam from .chat_completion_audio_param import ChatCompletionAudioParam from .chat_completion_message_param import ChatCompletionMessageParam +from .chat_completion_tool_union_param import ChatCompletionToolUnionParam from ..shared_params.function_parameters import FunctionParameters from ..shared_params.response_format_text import ResponseFormatText from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam @@ -177,13 +178,20 @@ class CompletionCreateParamsBase(TypedDict, total=False): far, increasing the model's likelihood to talk about new topics. """ - reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** + prompt_cache_key: str + """ + Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + """ + reasoning_effort: Optional[ReasoningEffort] + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ response_format: ResponseFormat @@ -199,6 +207,15 @@ class CompletionCreateParamsBase(TypedDict, total=False): preferred for models that support it. """ + safety_identifier: str + """ + A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + """ + seed: Optional[int] """ This feature is in Beta. If specified, our system will make a best effort to @@ -208,28 +225,26 @@ class CompletionCreateParamsBase(TypedDict, total=False): in the backend. """ - service_tier: Optional[Literal["auto", "default", "flex"]] - """Specifies the latency tier to use for processing the request. - - This parameter is relevant for customers subscribed to the scale tier service: + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] + """Specifies the processing type used for serving the request. - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. """ - stop: Union[Optional[str], List[str], None] + stop: Union[Optional[str], SequenceNotStr[str], None] """Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The @@ -241,6 +256,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): Whether or not to store the output of this chat completion request for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. + + Supports text and image inputs. Note: image inputs over 8MB will be dropped. """ stream_options: Optional[ChatCompletionStreamOptionsParam] @@ -267,12 +284,12 @@ class CompletionCreateParamsBase(TypedDict, total=False): are present. """ - tools: Iterable[ChatCompletionToolParam] + tools: Iterable[ChatCompletionToolUnionParam] """A list of tools the model may call. - Currently, only functions are supported as a tool. Use this to provide a list of - functions the model may generate JSON inputs for. A max of 128 functions are - supported. + You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). """ top_logprobs: Optional[int] @@ -292,11 +309,20 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ user: str - """A stable identifier for your end-users. + """This field is being replaced by `safety_identifier` and `prompt_cache_key`. + + Use `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + """ + + verbosity: Optional[Literal["low", "medium", "high"]] + """Constrains the verbosity of the model's response. - Used to boost cache hit rates by better bucketing similar requests and to help - OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. """ web_search_options: WebSearchOptions diff --git a/portkey_ai/_vendor/openai/types/chat/parsed_function_tool_call.py b/portkey_ai/_vendor/openai/types/chat/parsed_function_tool_call.py index 3e90789f..e06b3546 100644 --- a/portkey_ai/_vendor/openai/types/chat/parsed_function_tool_call.py +++ b/portkey_ai/_vendor/openai/types/chat/parsed_function_tool_call.py @@ -2,7 +2,7 @@ from typing import Optional -from .chat_completion_message_tool_call import Function, ChatCompletionMessageToolCall +from .chat_completion_message_function_tool_call import Function, ChatCompletionMessageFunctionToolCall __all__ = ["ParsedFunctionToolCall", "ParsedFunction"] @@ -24,6 +24,6 @@ class ParsedFunction(Function): """ -class ParsedFunctionToolCall(ChatCompletionMessageToolCall): +class ParsedFunctionToolCall(ChatCompletionMessageFunctionToolCall): function: ParsedFunction """The function that the model called.""" diff --git a/portkey_ai/_vendor/openai/types/completion_create_params.py b/portkey_ai/_vendor/openai/types/completion_create_params.py index 6ae20cff..f9beb9af 100644 --- a/portkey_ai/_vendor/openai/types/completion_create_params.py +++ b/portkey_ai/_vendor/openai/types/completion_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict +from .._types import SequenceNotStr from .chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam __all__ = ["CompletionCreateParamsBase", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming"] @@ -21,7 +22,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): them. """ - prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]] + prompt: Required[Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None]] """ The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -119,7 +120,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): response parameter to monitor changes in the backend. """ - stop: Union[Optional[str], List[str], None] + stop: Union[Optional[str], SequenceNotStr[str], None] """Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The diff --git a/portkey_ai/_vendor/openai/types/container_create_params.py b/portkey_ai/_vendor/openai/types/container_create_params.py index bd273349..01a48ac4 100644 --- a/portkey_ai/_vendor/openai/types/container_create_params.py +++ b/portkey_ai/_vendor/openai/types/container_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List from typing_extensions import Literal, Required, TypedDict +from .._types import SequenceNotStr + __all__ = ["ContainerCreateParams", "ExpiresAfter"] @@ -15,7 +16,7 @@ class ContainerCreateParams(TypedDict, total=False): expires_after: ExpiresAfter """Container expiration time in seconds relative to the 'anchor' time.""" - file_ids: List[str] + file_ids: SequenceNotStr[str] """IDs of files to copy to the container.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/__init__.py b/portkey_ai/_vendor/openai/types/conversations/__init__.py new file mode 100644 index 00000000..538966db --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/__init__.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .message import Message as Message +from .lob_prob import LobProb as LobProb +from .conversation import Conversation as Conversation +from .text_content import TextContent as TextContent +from .top_log_prob import TopLogProb as TopLogProb +from .refusal_content import RefusalContent as RefusalContent +from .item_list_params import ItemListParams as ItemListParams +from .conversation_item import ConversationItem as ConversationItem +from .url_citation_body import URLCitationBody as URLCitationBody +from .file_citation_body import FileCitationBody as FileCitationBody +from .input_file_content import InputFileContent as InputFileContent +from .input_text_content import InputTextContent as InputTextContent +from .item_create_params import ItemCreateParams as ItemCreateParams +from .input_image_content import InputImageContent as InputImageContent +from .output_text_content import OutputTextContent as OutputTextContent +from .item_retrieve_params import ItemRetrieveParams as ItemRetrieveParams +from .summary_text_content import SummaryTextContent as SummaryTextContent +from .conversation_item_list import ConversationItemList as ConversationItemList +from .conversation_create_params import ConversationCreateParams as ConversationCreateParams +from .conversation_update_params import ConversationUpdateParams as ConversationUpdateParams +from .computer_screenshot_content import ComputerScreenshotContent as ComputerScreenshotContent +from .container_file_citation_body import ContainerFileCitationBody as ContainerFileCitationBody +from .conversation_deleted_resource import ConversationDeletedResource as ConversationDeletedResource diff --git a/portkey_ai/_vendor/openai/types/conversations/computer_screenshot_content.py b/portkey_ai/_vendor/openai/types/conversations/computer_screenshot_content.py new file mode 100644 index 00000000..897b7ada --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/computer_screenshot_content.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ComputerScreenshotContent"] + + +class ComputerScreenshotContent(BaseModel): + file_id: Optional[str] = None + """The identifier of an uploaded file that contains the screenshot.""" + + image_url: Optional[str] = None + """The URL of the screenshot image.""" + + type: Literal["computer_screenshot"] + """Specifies the event type. + + For a computer screenshot, this property is always set to `computer_screenshot`. + """ diff --git a/portkey_ai/_vendor/openai/types/conversations/container_file_citation_body.py b/portkey_ai/_vendor/openai/types/conversations/container_file_citation_body.py new file mode 100644 index 00000000..ea460df2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/container_file_citation_body.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ContainerFileCitationBody"] + + +class ContainerFileCitationBody(BaseModel): + container_id: str + """The ID of the container file.""" + + end_index: int + """The index of the last character of the container file citation in the message.""" + + file_id: str + """The ID of the file.""" + + filename: str + """The filename of the container file cited.""" + + start_index: int + """The index of the first character of the container file citation in the message.""" + + type: Literal["container_file_citation"] + """The type of the container file citation. Always `container_file_citation`.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/conversation.py b/portkey_ai/_vendor/openai/types/conversations/conversation.py new file mode 100644 index 00000000..ed63d403 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/conversation.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["Conversation"] + + +class Conversation(BaseModel): + id: str + """The unique ID of the conversation.""" + + created_at: int + """ + The time at which the conversation was created, measured in seconds since the + Unix epoch. + """ + + metadata: object + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters. + """ + + object: Literal["conversation"] + """The object type, which is always `conversation`.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/conversation_create_params.py b/portkey_ai/_vendor/openai/types/conversations/conversation_create_params.py new file mode 100644 index 00000000..7ad3f8ae --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/conversation_create_params.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable, Optional +from typing_extensions import TypedDict + +from ..shared_params.metadata import Metadata +from ..responses.response_input_item_param import ResponseInputItemParam + +__all__ = ["ConversationCreateParams"] + + +class ConversationCreateParams(TypedDict, total=False): + items: Optional[Iterable[ResponseInputItemParam]] + """ + Initial items to include in the conversation context. You may add up to 20 items + at a time. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + Useful for storing additional information about the object in a structured + format. + """ diff --git a/portkey_ai/_vendor/openai/types/conversations/conversation_deleted_resource.py b/portkey_ai/_vendor/openai/types/conversations/conversation_deleted_resource.py new file mode 100644 index 00000000..7abcb244 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/conversation_deleted_resource.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ConversationDeletedResource"] + + +class ConversationDeletedResource(BaseModel): + id: str + + deleted: bool + + object: Literal["conversation.deleted"] diff --git a/portkey_ai/_vendor/openai/types/conversations/conversation_item.py b/portkey_ai/_vendor/openai/types/conversations/conversation_item.py new file mode 100644 index 00000000..a7cd355f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/conversation_item.py @@ -0,0 +1,209 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from .message import Message +from ..._utils import PropertyInfo +from ..._models import BaseModel +from ..responses.response_reasoning_item import ResponseReasoningItem +from ..responses.response_custom_tool_call import ResponseCustomToolCall +from ..responses.response_computer_tool_call import ResponseComputerToolCall +from ..responses.response_function_web_search import ResponseFunctionWebSearch +from ..responses.response_file_search_tool_call import ResponseFileSearchToolCall +from ..responses.response_custom_tool_call_output import ResponseCustomToolCallOutput +from ..responses.response_function_tool_call_item import ResponseFunctionToolCallItem +from ..responses.response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall +from ..responses.response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem +from ..responses.response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem + +__all__ = [ + "ConversationItem", + "ImageGenerationCall", + "LocalShellCall", + "LocalShellCallAction", + "LocalShellCallOutput", + "McpListTools", + "McpListToolsTool", + "McpApprovalRequest", + "McpApprovalResponse", + "McpCall", +] + + +class ImageGenerationCall(BaseModel): + id: str + """The unique ID of the image generation call.""" + + result: Optional[str] = None + """The generated image encoded in base64.""" + + status: Literal["in_progress", "completed", "generating", "failed"] + """The status of the image generation call.""" + + type: Literal["image_generation_call"] + """The type of the image generation call. Always `image_generation_call`.""" + + +class LocalShellCallAction(BaseModel): + command: List[str] + """The command to run.""" + + env: Dict[str, str] + """Environment variables to set for the command.""" + + type: Literal["exec"] + """The type of the local shell action. Always `exec`.""" + + timeout_ms: Optional[int] = None + """Optional timeout in milliseconds for the command.""" + + user: Optional[str] = None + """Optional user to run the command as.""" + + working_directory: Optional[str] = None + """Optional working directory to run the command in.""" + + +class LocalShellCall(BaseModel): + id: str + """The unique ID of the local shell call.""" + + action: LocalShellCallAction + """Execute a shell command on the server.""" + + call_id: str + """The unique ID of the local shell tool call generated by the model.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the local shell call.""" + + type: Literal["local_shell_call"] + """The type of the local shell call. Always `local_shell_call`.""" + + +class LocalShellCallOutput(BaseModel): + id: str + """The unique ID of the local shell tool call generated by the model.""" + + output: str + """A JSON string of the output of the local shell tool call.""" + + type: Literal["local_shell_call_output"] + """The type of the local shell tool call output. Always `local_shell_call_output`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. One of `in_progress`, `completed`, or `incomplete`.""" + + +class McpListToolsTool(BaseModel): + input_schema: object + """The JSON schema describing the tool's input.""" + + name: str + """The name of the tool.""" + + annotations: Optional[object] = None + """Additional annotations about the tool.""" + + description: Optional[str] = None + """The description of the tool.""" + + +class McpListTools(BaseModel): + id: str + """The unique ID of the list.""" + + server_label: str + """The label of the MCP server.""" + + tools: List[McpListToolsTool] + """The tools available on the server.""" + + type: Literal["mcp_list_tools"] + """The type of the item. Always `mcp_list_tools`.""" + + error: Optional[str] = None + """Error message if the server could not list tools.""" + + +class McpApprovalRequest(BaseModel): + id: str + """The unique ID of the approval request.""" + + arguments: str + """A JSON string of arguments for the tool.""" + + name: str + """The name of the tool to run.""" + + server_label: str + """The label of the MCP server making the request.""" + + type: Literal["mcp_approval_request"] + """The type of the item. Always `mcp_approval_request`.""" + + +class McpApprovalResponse(BaseModel): + id: str + """The unique ID of the approval response""" + + approval_request_id: str + """The ID of the approval request being answered.""" + + approve: bool + """Whether the request was approved.""" + + type: Literal["mcp_approval_response"] + """The type of the item. Always `mcp_approval_response`.""" + + reason: Optional[str] = None + """Optional reason for the decision.""" + + +class McpCall(BaseModel): + id: str + """The unique ID of the tool call.""" + + arguments: str + """A JSON string of the arguments passed to the tool.""" + + name: str + """The name of the tool that was run.""" + + server_label: str + """The label of the MCP server running the tool.""" + + type: Literal["mcp_call"] + """The type of the item. Always `mcp_call`.""" + + error: Optional[str] = None + """The error from the tool call, if any.""" + + output: Optional[str] = None + """The output from the tool call.""" + + +ConversationItem: TypeAlias = Annotated[ + Union[ + Message, + ResponseFunctionToolCallItem, + ResponseFunctionToolCallOutputItem, + ResponseFileSearchToolCall, + ResponseFunctionWebSearch, + ImageGenerationCall, + ResponseComputerToolCall, + ResponseComputerToolCallOutputItem, + ResponseReasoningItem, + ResponseCodeInterpreterToolCall, + LocalShellCall, + LocalShellCallOutput, + McpListTools, + McpApprovalRequest, + McpApprovalResponse, + McpCall, + ResponseCustomToolCall, + ResponseCustomToolCallOutput, + ], + PropertyInfo(discriminator="type"), +] diff --git a/portkey_ai/_vendor/openai/types/conversations/conversation_item_list.py b/portkey_ai/_vendor/openai/types/conversations/conversation_item_list.py new file mode 100644 index 00000000..20091102 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/conversation_item_list.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ..._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ConversationItemList"] + + +class ConversationItemList(BaseModel): + data: List[ConversationItem] + """A list of conversation items.""" + + first_id: str + """The ID of the first item in the list.""" + + has_more: bool + """Whether there are more items available.""" + + last_id: str + """The ID of the last item in the list.""" + + object: Literal["list"] + """The type of object returned, must be `list`.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/conversation_update_params.py b/portkey_ai/_vendor/openai/types/conversations/conversation_update_params.py new file mode 100644 index 00000000..f2aa42d8 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/conversation_update_params.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict +from typing_extensions import Required, TypedDict + +__all__ = ["ConversationUpdateParams"] + + +class ConversationUpdateParams(TypedDict, total=False): + metadata: Required[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters. + """ diff --git a/portkey_ai/_vendor/openai/types/conversations/file_citation_body.py b/portkey_ai/_vendor/openai/types/conversations/file_citation_body.py new file mode 100644 index 00000000..ea90ae38 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/file_citation_body.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FileCitationBody"] + + +class FileCitationBody(BaseModel): + file_id: str + """The ID of the file.""" + + filename: str + """The filename of the file cited.""" + + index: int + """The index of the file in the list of files.""" + + type: Literal["file_citation"] + """The type of the file citation. Always `file_citation`.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/input_file_content.py b/portkey_ai/_vendor/openai/types/conversations/input_file_content.py new file mode 100644 index 00000000..6aef7a89 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/input_file_content.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputFileContent"] + + +class InputFileContent(BaseModel): + file_id: Optional[str] = None + """The ID of the file to be sent to the model.""" + + type: Literal["input_file"] + """The type of the input item. Always `input_file`.""" + + file_url: Optional[str] = None + """The URL of the file to be sent to the model.""" + + filename: Optional[str] = None + """The name of the file to be sent to the model.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/input_image_content.py b/portkey_ai/_vendor/openai/types/conversations/input_image_content.py new file mode 100644 index 00000000..f2587e0a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/input_image_content.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputImageContent"] + + +class InputImageContent(BaseModel): + detail: Literal["low", "high", "auto"] + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + file_id: Optional[str] = None + """The ID of the file to be sent to the model.""" + + image_url: Optional[str] = None + """The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. + """ + + type: Literal["input_image"] + """The type of the input item. Always `input_image`.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/input_text_content.py b/portkey_ai/_vendor/openai/types/conversations/input_text_content.py new file mode 100644 index 00000000..5e2daebd --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/input_text_content.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputTextContent"] + + +class InputTextContent(BaseModel): + text: str + """The text input to the model.""" + + type: Literal["input_text"] + """The type of the input item. Always `input_text`.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/item_create_params.py b/portkey_ai/_vendor/openai/types/conversations/item_create_params.py new file mode 100644 index 00000000..9158b716 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/item_create_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable +from typing_extensions import Required, TypedDict + +from ..responses.response_includable import ResponseIncludable +from ..responses.response_input_item_param import ResponseInputItemParam + +__all__ = ["ItemCreateParams"] + + +class ItemCreateParams(TypedDict, total=False): + items: Required[Iterable[ResponseInputItemParam]] + """The items to add to the conversation. You may add up to 20 items at a time.""" + + include: List[ResponseIncludable] + """Additional fields to include in the response. + + See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + """ diff --git a/portkey_ai/_vendor/openai/types/conversations/item_list_params.py b/portkey_ai/_vendor/openai/types/conversations/item_list_params.py new file mode 100644 index 00000000..a4dd61f3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/item_list_params.py @@ -0,0 +1,50 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, TypedDict + +from ..responses.response_includable import ResponseIncludable + +__all__ = ["ItemListParams"] + + +class ItemListParams(TypedDict, total=False): + after: str + """An item ID to list items after, used in pagination.""" + + include: List[ResponseIncludable] + """Specify additional output data to include in the model response. + + Currently supported values are: + + - `web_search_call.action.sources`: Include the sources of the web search tool + call. + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """The order to return the input items in. Default is `desc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + """ diff --git a/portkey_ai/_vendor/openai/types/conversations/item_retrieve_params.py b/portkey_ai/_vendor/openai/types/conversations/item_retrieve_params.py new file mode 100644 index 00000000..8c5db1e5 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/item_retrieve_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Required, TypedDict + +from ..responses.response_includable import ResponseIncludable + +__all__ = ["ItemRetrieveParams"] + + +class ItemRetrieveParams(TypedDict, total=False): + conversation_id: Required[str] + + include: List[ResponseIncludable] + """Additional fields to include in the response. + + See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + """ diff --git a/portkey_ai/_vendor/openai/types/conversations/lob_prob.py b/portkey_ai/_vendor/openai/types/conversations/lob_prob.py new file mode 100644 index 00000000..f7dcd62a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/lob_prob.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel +from .top_log_prob import TopLogProb + +__all__ = ["LobProb"] + + +class LobProb(BaseModel): + token: str + + bytes: List[int] + + logprob: float + + top_logprobs: List[TopLogProb] diff --git a/portkey_ai/_vendor/openai/types/conversations/message.py b/portkey_ai/_vendor/openai/types/conversations/message.py new file mode 100644 index 00000000..a070cf28 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/message.py @@ -0,0 +1,56 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .text_content import TextContent +from .refusal_content import RefusalContent +from .input_file_content import InputFileContent +from .input_text_content import InputTextContent +from .input_image_content import InputImageContent +from .output_text_content import OutputTextContent +from .summary_text_content import SummaryTextContent +from .computer_screenshot_content import ComputerScreenshotContent + +__all__ = ["Message", "Content"] + +Content: TypeAlias = Annotated[ + Union[ + InputTextContent, + OutputTextContent, + TextContent, + SummaryTextContent, + RefusalContent, + InputImageContent, + ComputerScreenshotContent, + InputFileContent, + ], + PropertyInfo(discriminator="type"), +] + + +class Message(BaseModel): + id: str + """The unique ID of the message.""" + + content: List[Content] + """The content of the message""" + + role: Literal["unknown", "user", "assistant", "system", "critic", "discriminator", "developer", "tool"] + """The role of the message. + + One of `unknown`, `user`, `assistant`, `system`, `critic`, `discriminator`, + `developer`, or `tool`. + """ + + status: Literal["in_progress", "completed", "incomplete"] + """The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Literal["message"] + """The type of the message. Always set to `message`.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/output_text_content.py b/portkey_ai/_vendor/openai/types/conversations/output_text_content.py new file mode 100644 index 00000000..2ffee765 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/output_text_content.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .lob_prob import LobProb +from ..._models import BaseModel +from .url_citation_body import URLCitationBody +from .file_citation_body import FileCitationBody +from .container_file_citation_body import ContainerFileCitationBody + +__all__ = ["OutputTextContent", "Annotation"] + +Annotation: TypeAlias = Annotated[ + Union[FileCitationBody, URLCitationBody, ContainerFileCitationBody], PropertyInfo(discriminator="type") +] + + +class OutputTextContent(BaseModel): + annotations: List[Annotation] + """The annotations of the text output.""" + + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + logprobs: Optional[List[LobProb]] = None diff --git a/portkey_ai/_vendor/openai/types/conversations/refusal_content.py b/portkey_ai/_vendor/openai/types/conversations/refusal_content.py new file mode 100644 index 00000000..3c8bd5e3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/refusal_content.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RefusalContent"] + + +class RefusalContent(BaseModel): + refusal: str + """The refusal explanation from the model.""" + + type: Literal["refusal"] + """The type of the refusal. Always `refusal`.""" diff --git a/portkey_ai/_vendor/openai/types/conversations/summary_text_content.py b/portkey_ai/_vendor/openai/types/conversations/summary_text_content.py new file mode 100644 index 00000000..047769ed --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/summary_text_content.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["SummaryTextContent"] + + +class SummaryTextContent(BaseModel): + text: str + + type: Literal["summary_text"] diff --git a/portkey_ai/_vendor/openai/types/conversations/text_content.py b/portkey_ai/_vendor/openai/types/conversations/text_content.py new file mode 100644 index 00000000..f1ae0795 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/text_content.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["TextContent"] + + +class TextContent(BaseModel): + text: str + + type: Literal["text"] diff --git a/portkey_ai/_vendor/openai/types/conversations/top_log_prob.py b/portkey_ai/_vendor/openai/types/conversations/top_log_prob.py new file mode 100644 index 00000000..fafca756 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/top_log_prob.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel + +__all__ = ["TopLogProb"] + + +class TopLogProb(BaseModel): + token: str + + bytes: List[int] + + logprob: float diff --git a/portkey_ai/_vendor/openai/types/conversations/url_citation_body.py b/portkey_ai/_vendor/openai/types/conversations/url_citation_body.py new file mode 100644 index 00000000..1becb44b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/conversations/url_citation_body.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["URLCitationBody"] + + +class URLCitationBody(BaseModel): + end_index: int + """The index of the last character of the URL citation in the message.""" + + start_index: int + """The index of the first character of the URL citation in the message.""" + + title: str + """The title of the web resource.""" + + type: Literal["url_citation"] + """The type of the URL citation. Always `url_citation`.""" + + url: str + """The URL of the web resource.""" diff --git a/portkey_ai/_vendor/openai/types/embedding_create_params.py b/portkey_ai/_vendor/openai/types/embedding_create_params.py index 94edce10..ab3e8779 100644 --- a/portkey_ai/_vendor/openai/types/embedding_create_params.py +++ b/portkey_ai/_vendor/openai/types/embedding_create_params.py @@ -2,16 +2,17 @@ from __future__ import annotations -from typing import List, Union, Iterable +from typing import Union, Iterable from typing_extensions import Literal, Required, TypedDict +from .._types import SequenceNotStr from .embedding_model import EmbeddingModel __all__ = ["EmbeddingCreateParams"] class EmbeddingCreateParams(TypedDict, total=False): - input: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]]]] + input: Required[Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]]]] """Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array diff --git a/portkey_ai/_vendor/openai/types/eval_create_params.py b/portkey_ai/_vendor/openai/types/eval_create_params.py index 20a37654..eb7f86cd 100644 --- a/portkey_ai/_vendor/openai/types/eval_create_params.py +++ b/portkey_ai/_vendor/openai/types/eval_create_params.py @@ -2,15 +2,17 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from .._types import SequenceNotStr from .shared_params.metadata import Metadata from .graders.python_grader_param import PythonGraderParam from .graders.score_model_grader_param import ScoreModelGraderParam from .graders.string_check_grader_param import StringCheckGraderParam from .responses.response_input_text_param import ResponseInputTextParam from .graders.text_similarity_grader_param import TextSimilarityGraderParam +from .responses.response_input_audio_param import ResponseInputAudioParam __all__ = [ "EvalCreateParams", @@ -25,6 +27,7 @@ "TestingCriterionLabelModelInputEvalItem", "TestingCriterionLabelModelInputEvalItemContent", "TestingCriterionLabelModelInputEvalItemContentOutputText", + "TestingCriterionLabelModelInputEvalItemContentInputImage", "TestingCriterionTextSimilarity", "TestingCriterionPython", "TestingCriterionScoreModel", @@ -109,14 +112,33 @@ class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total= """The type of the output text. Always `output_text`.""" +class TestingCriterionLabelModelInputEvalItemContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + TestingCriterionLabelModelInputEvalItemContent: TypeAlias = Union[ - str, ResponseInputTextParam, TestingCriterionLabelModelInputEvalItemContentOutputText + str, + ResponseInputTextParam, + TestingCriterionLabelModelInputEvalItemContentOutputText, + TestingCriterionLabelModelInputEvalItemContentInputImage, + ResponseInputAudioParam, + Iterable[object], ] class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False): content: Required[TestingCriterionLabelModelInputEvalItemContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. @@ -140,7 +162,7 @@ class TestingCriterionLabelModel(TypedDict, total=False): May include variable references to the `item` namespace, ie {{item.name}}. """ - labels: Required[List[str]] + labels: Required[SequenceNotStr[str]] """The labels to classify to each item in the evaluation.""" model: Required[str] @@ -149,7 +171,7 @@ class TestingCriterionLabelModel(TypedDict, total=False): name: Required[str] """The name of the grader.""" - passing_labels: Required[List[str]] + passing_labels: Required[SequenceNotStr[str]] """The labels that indicate a passing result. Must be a subset of labels.""" type: Required[Literal["label_model"]] diff --git a/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source.py b/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source.py index 0a942cd2..edf70c8a 100644 --- a/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source.py +++ b/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source.py @@ -6,10 +6,11 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from ..shared.metadata import Metadata -from ..chat.chat_completion_tool import ChatCompletionTool from ..shared.response_format_text import ResponseFormatText from ..responses.easy_input_message import EasyInputMessage from ..responses.response_input_text import ResponseInputText +from ..responses.response_input_audio import ResponseInputAudio +from ..chat.chat_completion_function_tool import ChatCompletionFunctionTool from ..shared.response_format_json_object import ResponseFormatJSONObject from ..shared.response_format_json_schema import ResponseFormatJSONSchema @@ -23,9 +24,10 @@ "InputMessages", "InputMessagesTemplate", "InputMessagesTemplateTemplate", - "InputMessagesTemplateTemplateMessage", - "InputMessagesTemplateTemplateMessageContent", - "InputMessagesTemplateTemplateMessageContentOutputText", + "InputMessagesTemplateTemplateEvalItem", + "InputMessagesTemplateTemplateEvalItemContent", + "InputMessagesTemplateTemplateEvalItemContentOutputText", + "InputMessagesTemplateTemplateEvalItemContentInputImage", "InputMessagesItemReference", "SamplingParams", "SamplingParamsResponseFormat", @@ -86,7 +88,7 @@ class SourceStoredCompletions(BaseModel): ] -class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel): +class InputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): text: str """The text output from the model.""" @@ -94,14 +96,33 @@ class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel): """The type of the output text. Always `output_text`.""" -InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ - str, ResponseInputText, InputMessagesTemplateTemplateMessageContentOutputText +class InputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ + str, + ResponseInputText, + InputMessagesTemplateTemplateEvalItemContentOutputText, + InputMessagesTemplateTemplateEvalItemContentInputImage, + ResponseInputAudio, + List[object], ] -class InputMessagesTemplateTemplateMessage(BaseModel): - content: InputMessagesTemplateTemplateMessageContent - """Text inputs to the model - can contain template strings.""" +class InputMessagesTemplateTemplateEvalItem(BaseModel): + content: InputMessagesTemplateTemplateEvalItemContent + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. @@ -113,9 +134,7 @@ class InputMessagesTemplateTemplateMessage(BaseModel): """The type of the message input. Always `message`.""" -InputMessagesTemplateTemplate: TypeAlias = Annotated[ - Union[EasyInputMessage, InputMessagesTemplateTemplateMessage], PropertyInfo(discriminator="type") -] +InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessage, InputMessagesTemplateTemplateEvalItem] class InputMessagesTemplate(BaseModel): @@ -167,7 +186,7 @@ class SamplingParams(BaseModel): temperature: Optional[float] = None """A higher temperature increases randomness in the outputs.""" - tools: Optional[List[ChatCompletionTool]] = None + tools: Optional[List[ChatCompletionFunctionTool]] = None """A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of diff --git a/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source_param.py b/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source_param.py index 84344fcd..c14360ac 100644 --- a/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/portkey_ai/_vendor/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -6,10 +6,11 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..shared_params.metadata import Metadata -from ..chat.chat_completion_tool_param import ChatCompletionToolParam from ..responses.easy_input_message_param import EasyInputMessageParam from ..shared_params.response_format_text import ResponseFormatText from ..responses.response_input_text_param import ResponseInputTextParam +from ..responses.response_input_audio_param import ResponseInputAudioParam +from ..chat.chat_completion_function_tool_param import ChatCompletionFunctionToolParam from ..shared_params.response_format_json_object import ResponseFormatJSONObject from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema @@ -23,9 +24,10 @@ "InputMessages", "InputMessagesTemplate", "InputMessagesTemplateTemplate", - "InputMessagesTemplateTemplateMessage", - "InputMessagesTemplateTemplateMessageContent", - "InputMessagesTemplateTemplateMessageContentOutputText", + "InputMessagesTemplateTemplateEvalItem", + "InputMessagesTemplateTemplateEvalItemContent", + "InputMessagesTemplateTemplateEvalItemContentOutputText", + "InputMessagesTemplateTemplateEvalItemContentInputImage", "InputMessagesItemReference", "SamplingParams", "SamplingParamsResponseFormat", @@ -84,7 +86,7 @@ class SourceStoredCompletions(TypedDict, total=False): Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceStoredCompletions] -class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=False): +class InputMessagesTemplateTemplateEvalItemContentOutputText(TypedDict, total=False): text: Required[str] """The text output from the model.""" @@ -92,14 +94,33 @@ class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=Fal """The type of the output text. Always `output_text`.""" -InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ - str, ResponseInputTextParam, InputMessagesTemplateTemplateMessageContentOutputText +class InputMessagesTemplateTemplateEvalItemContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ + str, + ResponseInputTextParam, + InputMessagesTemplateTemplateEvalItemContentOutputText, + InputMessagesTemplateTemplateEvalItemContentInputImage, + ResponseInputAudioParam, + Iterable[object], ] -class InputMessagesTemplateTemplateMessage(TypedDict, total=False): - content: Required[InputMessagesTemplateTemplateMessageContent] - """Text inputs to the model - can contain template strings.""" +class InputMessagesTemplateTemplateEvalItem(TypedDict, total=False): + content: Required[InputMessagesTemplateTemplateEvalItemContent] + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. @@ -111,7 +132,7 @@ class InputMessagesTemplateTemplateMessage(TypedDict, total=False): """The type of the message input. Always `message`.""" -InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateMessage] +InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateEvalItem] class InputMessagesTemplate(TypedDict, total=False): @@ -161,7 +182,7 @@ class SamplingParams(TypedDict, total=False): temperature: float """A higher temperature increases randomness in the outputs.""" - tools: Iterable[ChatCompletionToolParam] + tools: Iterable[ChatCompletionFunctionToolParam] """A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of diff --git a/portkey_ai/_vendor/openai/types/evals/run_cancel_response.py b/portkey_ai/_vendor/openai/types/evals/run_cancel_response.py index 12cc8680..44f9cfc4 100644 --- a/portkey_ai/_vendor/openai/types/evals/run_cancel_response.py +++ b/portkey_ai/_vendor/openai/types/evals/run_cancel_response.py @@ -12,6 +12,7 @@ from ..shared.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText +from ..responses.response_input_audio import ResponseInputAudio from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource from ..responses.response_format_text_config import ResponseFormatTextConfig from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource @@ -32,6 +33,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +140,33 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + ResponseInputAudio, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/portkey_ai/_vendor/openai/types/evals/run_create_params.py b/portkey_ai/_vendor/openai/types/evals/run_create_params.py index 354a8113..ef9541ff 100644 --- a/portkey_ai/_vendor/openai/types/evals/run_create_params.py +++ b/portkey_ai/_vendor/openai/types/evals/run_create_params.py @@ -2,13 +2,15 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr from ..responses.tool_param import ToolParam from ..shared_params.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text_param import ResponseInputTextParam +from ..responses.response_input_audio_param import ResponseInputAudioParam from .create_eval_jsonl_run_data_source_param import CreateEvalJSONLRunDataSourceParam from ..responses.response_format_text_config_param import ResponseFormatTextConfigParam from .create_eval_completions_run_data_source_param import CreateEvalCompletionsRunDataSourceParam @@ -29,6 +31,7 @@ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem", "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent", "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference", "DataSourceCreateEvalResponsesRunDataSourceSamplingParams", "DataSourceCreateEvalResponsesRunDataSourceSamplingParamsText", @@ -118,13 +121,13 @@ class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total temperature: Optional[float] """Sampling temperature. This is a query parameter used to select responses.""" - tools: Optional[List[str]] + tools: Optional[SequenceNotStr[str]] """List of tool names. This is a query parameter used to select responses.""" top_p: Optional[float] """Nucleus sampling parameter. This is a query parameter used to select responses.""" - users: Optional[List[str]] + users: Optional[SequenceNotStr[str]] """List of user identifiers. This is a query parameter used to select responses.""" @@ -153,16 +156,35 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEva """The type of the output text. Always `output_text`.""" +class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage( + TypedDict, total=False +): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ str, ResponseInputTextParam, DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage, + ResponseInputAudioParam, + Iterable[object], ] class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem(TypedDict, total=False): content: Required[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. diff --git a/portkey_ai/_vendor/openai/types/evals/run_create_response.py b/portkey_ai/_vendor/openai/types/evals/run_create_response.py index 776ebb41..70641d6d 100644 --- a/portkey_ai/_vendor/openai/types/evals/run_create_response.py +++ b/portkey_ai/_vendor/openai/types/evals/run_create_response.py @@ -12,6 +12,7 @@ from ..shared.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText +from ..responses.response_input_audio import ResponseInputAudio from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource from ..responses.response_format_text_config import ResponseFormatTextConfig from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource @@ -32,6 +33,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +140,33 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + ResponseInputAudio, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/portkey_ai/_vendor/openai/types/evals/run_list_response.py b/portkey_ai/_vendor/openai/types/evals/run_list_response.py index 9e2374f9..e31d570a 100644 --- a/portkey_ai/_vendor/openai/types/evals/run_list_response.py +++ b/portkey_ai/_vendor/openai/types/evals/run_list_response.py @@ -12,6 +12,7 @@ from ..shared.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText +from ..responses.response_input_audio import ResponseInputAudio from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource from ..responses.response_format_text_config import ResponseFormatTextConfig from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource @@ -32,6 +33,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +140,33 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + ResponseInputAudio, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/portkey_ai/_vendor/openai/types/evals/run_retrieve_response.py b/portkey_ai/_vendor/openai/types/evals/run_retrieve_response.py index a4f43ce3..62213d3e 100644 --- a/portkey_ai/_vendor/openai/types/evals/run_retrieve_response.py +++ b/portkey_ai/_vendor/openai/types/evals/run_retrieve_response.py @@ -12,6 +12,7 @@ from ..shared.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText +from ..responses.response_input_audio import ResponseInputAudio from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource from ..responses.response_format_text_config import ResponseFormatTextConfig from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource @@ -32,6 +33,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +140,33 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + ResponseInputAudio, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/portkey_ai/_vendor/openai/types/file_create_params.py b/portkey_ai/_vendor/openai/types/file_create_params.py index 728dfd35..f4583b16 100644 --- a/portkey_ai/_vendor/openai/types/file_create_params.py +++ b/portkey_ai/_vendor/openai/types/file_create_params.py @@ -2,12 +2,12 @@ from __future__ import annotations -from typing_extensions import Required, TypedDict +from typing_extensions import Literal, Required, TypedDict from .._types import FileTypes from .file_purpose import FilePurpose -__all__ = ["FileCreateParams"] +__all__ = ["FileCreateParams", "ExpiresAfter"] class FileCreateParams(TypedDict, total=False): @@ -22,3 +22,24 @@ class FileCreateParams(TypedDict, total=False): fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used for eval data sets """ + + expires_after: ExpiresAfter + """The expiration policy for a file. + + By default, files with `purpose=batch` expire after 30 days and all other files + are persisted until they are manually deleted. + """ + + +class ExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `created_at`. + """ + + seconds: Required[int] + """The number of seconds after the anchor time that the file will expire. + + Must be between 3600 (1 hour) and 2592000 (30 days). + """ diff --git a/portkey_ai/_vendor/openai/types/file_object.py b/portkey_ai/_vendor/openai/types/file_object.py index 1d65e698..883c2de0 100644 --- a/portkey_ai/_vendor/openai/types/file_object.py +++ b/portkey_ai/_vendor/openai/types/file_object.py @@ -25,12 +25,19 @@ class FileObject(BaseModel): """The object type, which is always `file`.""" purpose: Literal[ - "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "vision" + "assistants", + "assistants_output", + "batch", + "batch_output", + "fine-tune", + "fine-tune-results", + "vision", + "user_data", ] """The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, - `fine-tune`, `fine-tune-results` and `vision`. + `fine-tune`, `fine-tune-results`, `vision`, and `user_data`. """ status: Literal["uploaded", "processed", "error"] diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_create_params.py b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_create_params.py index 92f98f21..e7cf4e4e 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_create_params.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/checkpoints/permission_create_params.py @@ -2,12 +2,13 @@ from __future__ import annotations -from typing import List from typing_extensions import Required, TypedDict +from ...._types import SequenceNotStr + __all__ = ["PermissionCreateParams"] class PermissionCreateParams(TypedDict, total=False): - project_ids: Required[List[str]] + project_ids: Required[SequenceNotStr[str]] """The project identifiers to grant access to.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py b/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py index 6b2f41cb..351d4e0e 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Union, Iterable, Optional +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict +from ..._types import SequenceNotStr from .dpo_method_param import DpoMethodParam from ..shared_params.metadata import Metadata from .supervised_method_param import SupervisedMethodParam @@ -37,7 +38,8 @@ class JobCreateParams(TypedDict, total=False): [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. """ @@ -91,7 +93,8 @@ class JobCreateParams(TypedDict, total=False): Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + See the + [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details. """ @@ -135,7 +138,7 @@ class IntegrationWandb(TypedDict, total=False): If not set, we will use the Job ID as the name. """ - tags: List[str] + tags: SequenceNotStr[str] """A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some default tags are generated diff --git a/portkey_ai/_vendor/openai/types/graders/label_model_grader.py b/portkey_ai/_vendor/openai/types/graders/label_model_grader.py index d95ccc6d..0929349c 100644 --- a/portkey_ai/_vendor/openai/types/graders/label_model_grader.py +++ b/portkey_ai/_vendor/openai/types/graders/label_model_grader.py @@ -5,8 +5,9 @@ from ..._models import BaseModel from ..responses.response_input_text import ResponseInputText +from ..responses.response_input_audio import ResponseInputAudio -__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(BaseModel): @@ -17,12 +18,28 @@ class InputContentOutputText(BaseModel): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText] +class InputContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[ + str, ResponseInputText, InputContentOutputText, InputContentInputImage, ResponseInputAudio, List[object] +] class Input(BaseModel): content: InputContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/portkey_ai/_vendor/openai/types/graders/label_model_grader_param.py b/portkey_ai/_vendor/openai/types/graders/label_model_grader_param.py index 76d01421..7bd6fdb4 100644 --- a/portkey_ai/_vendor/openai/types/graders/label_model_grader_param.py +++ b/portkey_ai/_vendor/openai/types/graders/label_model_grader_param.py @@ -2,12 +2,14 @@ from __future__ import annotations -from typing import List, Union, Iterable +from typing import Union, Iterable from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr from ..responses.response_input_text_param import ResponseInputTextParam +from ..responses.response_input_audio_param import ResponseInputAudioParam -__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(TypedDict, total=False): @@ -18,12 +20,33 @@ class InputContentOutputText(TypedDict, total=False): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText] +class InputContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[ + str, + ResponseInputTextParam, + InputContentOutputText, + InputContentInputImage, + ResponseInputAudioParam, + Iterable[object], +] class Input(TypedDict, total=False): content: Required[InputContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. @@ -38,7 +61,7 @@ class Input(TypedDict, total=False): class LabelModelGraderParam(TypedDict, total=False): input: Required[Iterable[Input]] - labels: Required[List[str]] + labels: Required[SequenceNotStr[str]] """The labels to assign to each item in the evaluation.""" model: Required[str] @@ -47,7 +70,7 @@ class LabelModelGraderParam(TypedDict, total=False): name: Required[str] """The name of the grader.""" - passing_labels: Required[List[str]] + passing_labels: Required[SequenceNotStr[str]] """The labels that indicate a passing result. Must be a subset of labels.""" type: Required[Literal["label_model"]] diff --git a/portkey_ai/_vendor/openai/types/graders/score_model_grader.py b/portkey_ai/_vendor/openai/types/graders/score_model_grader.py index 1349f75a..fc221b8e 100644 --- a/portkey_ai/_vendor/openai/types/graders/score_model_grader.py +++ b/portkey_ai/_vendor/openai/types/graders/score_model_grader.py @@ -5,8 +5,9 @@ from ..._models import BaseModel from ..responses.response_input_text import ResponseInputText +from ..responses.response_input_audio import ResponseInputAudio -__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(BaseModel): @@ -17,12 +18,28 @@ class InputContentOutputText(BaseModel): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText] +class InputContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[ + str, ResponseInputText, InputContentOutputText, InputContentInputImage, ResponseInputAudio, List[object] +] class Input(BaseModel): content: InputContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/portkey_ai/_vendor/openai/types/graders/score_model_grader_param.py b/portkey_ai/_vendor/openai/types/graders/score_model_grader_param.py index 673f14e4..15100bb7 100644 --- a/portkey_ai/_vendor/openai/types/graders/score_model_grader_param.py +++ b/portkey_ai/_vendor/openai/types/graders/score_model_grader_param.py @@ -6,8 +6,9 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..responses.response_input_text_param import ResponseInputTextParam +from ..responses.response_input_audio_param import ResponseInputAudioParam -__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(TypedDict, total=False): @@ -18,12 +19,33 @@ class InputContentOutputText(TypedDict, total=False): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText] +class InputContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[ + str, + ResponseInputTextParam, + InputContentOutputText, + InputContentInputImage, + ResponseInputAudioParam, + Iterable[object], +] class Input(TypedDict, total=False): content: Required[InputContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. diff --git a/portkey_ai/_vendor/openai/types/graders/text_similarity_grader.py b/portkey_ai/_vendor/openai/types/graders/text_similarity_grader.py index 738d3177..9082ac89 100644 --- a/portkey_ai/_vendor/openai/types/graders/text_similarity_grader.py +++ b/portkey_ai/_vendor/openai/types/graders/text_similarity_grader.py @@ -9,12 +9,22 @@ class TextSimilarityGrader(BaseModel): evaluation_metric: Literal[ - "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l" + "cosine", + "fuzzy_match", + "bleu", + "gleu", + "meteor", + "rouge_1", + "rouge_2", + "rouge_3", + "rouge_4", + "rouge_5", + "rouge_l", ] """The evaluation metric to use. - One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, - `rouge_4`, `rouge_5`, or `rouge_l`. + One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, + `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. """ input: str diff --git a/portkey_ai/_vendor/openai/types/graders/text_similarity_grader_param.py b/portkey_ai/_vendor/openai/types/graders/text_similarity_grader_param.py index db145532..1646afc8 100644 --- a/portkey_ai/_vendor/openai/types/graders/text_similarity_grader_param.py +++ b/portkey_ai/_vendor/openai/types/graders/text_similarity_grader_param.py @@ -10,13 +10,23 @@ class TextSimilarityGraderParam(TypedDict, total=False): evaluation_metric: Required[ Literal[ - "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l" + "cosine", + "fuzzy_match", + "bleu", + "gleu", + "meteor", + "rouge_1", + "rouge_2", + "rouge_3", + "rouge_4", + "rouge_5", + "rouge_l", ] ] """The evaluation metric to use. - One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, - `rouge_4`, `rouge_5`, or `rouge_l`. + One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, + `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. """ input: Required[str] diff --git a/portkey_ai/_vendor/openai/types/image_edit_completed_event.py b/portkey_ai/_vendor/openai/types/image_edit_completed_event.py new file mode 100644 index 00000000..a40682da --- /dev/null +++ b/portkey_ai/_vendor/openai/types/image_edit_completed_event.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ImageEditCompletedEvent", "Usage", "UsageInputTokensDetails"] + + +class UsageInputTokensDetails(BaseModel): + image_tokens: int + """The number of image tokens in the input prompt.""" + + text_tokens: int + """The number of text tokens in the input prompt.""" + + +class Usage(BaseModel): + input_tokens: int + """The number of tokens (images and text) in the input prompt.""" + + input_tokens_details: UsageInputTokensDetails + """The input tokens detailed information for the image generation.""" + + output_tokens: int + """The number of image tokens in the output image.""" + + total_tokens: int + """The total number of tokens (images and text) used for the image generation.""" + + +class ImageEditCompletedEvent(BaseModel): + b64_json: str + """Base64-encoded final edited image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the edited image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the edited image.""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the edited image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the edited image.""" + + type: Literal["image_edit.completed"] + """The type of the event. Always `image_edit.completed`.""" + + usage: Usage + """For `gpt-image-1` only, the token usage information for the image generation.""" diff --git a/portkey_ai/_vendor/openai/types/image_edit_params.py b/portkey_ai/_vendor/openai/types/image_edit_params.py index 4f931ce1..065d9789 100644 --- a/portkey_ai/_vendor/openai/types/image_edit_params.py +++ b/portkey_ai/_vendor/openai/types/image_edit_params.py @@ -2,17 +2,17 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import Union, Optional from typing_extensions import Literal, Required, TypedDict -from .._types import FileTypes +from .._types import FileTypes, SequenceNotStr from .image_model import ImageModel -__all__ = ["ImageEditParams"] +__all__ = ["ImageEditParamsBase", "ImageEditParamsNonStreaming", "ImageEditParamsStreaming"] -class ImageEditParams(TypedDict, total=False): - image: Required[Union[FileTypes, List[FileTypes]]] +class ImageEditParamsBase(TypedDict, total=False): + image: Required[Union[FileTypes, SequenceNotStr[FileTypes]]] """The image(s) to edit. Must be a supported image file or an array of images. For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than @@ -40,6 +40,13 @@ class ImageEditParams(TypedDict, total=False): be set to either `png` (default value) or `webp`. """ + input_fidelity: Optional[Literal["high", "low"]] + """ + Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + """ + mask: FileTypes """An additional image whose fully transparent areas (e.g. @@ -58,6 +65,31 @@ class ImageEditParams(TypedDict, total=False): n: Optional[int] """The number of images to generate. Must be between 1 and 10.""" + output_compression: Optional[int] + """The compression level (0-100%) for the generated images. + + This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg` + output formats, and defaults to 100. + """ + + output_format: Optional[Literal["png", "jpeg", "webp"]] + """The format in which the generated images are returned. + + This parameter is only supported for `gpt-image-1`. Must be one of `png`, + `jpeg`, or `webp`. The default value is `png`. + """ + + partial_images: Optional[int] + """The number of partial images to generate. + + This parameter is used for streaming responses that return partial images. Value + must be between 0 and 3. When set to 0, the response will be a single image sent + in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + """ + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] """The quality of the image that will be generated. @@ -87,3 +119,26 @@ class ImageEditParams(TypedDict, total=False): and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ + + +class ImageEditParamsNonStreaming(ImageEditParamsBase, total=False): + stream: Optional[Literal[False]] + """Edit the image in streaming mode. + + Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + """ + + +class ImageEditParamsStreaming(ImageEditParamsBase): + stream: Required[Literal[True]] + """Edit the image in streaming mode. + + Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + """ + + +ImageEditParams = Union[ImageEditParamsNonStreaming, ImageEditParamsStreaming] diff --git a/portkey_ai/_vendor/openai/types/image_edit_partial_image_event.py b/portkey_ai/_vendor/openai/types/image_edit_partial_image_event.py new file mode 100644 index 00000000..20da45ef --- /dev/null +++ b/portkey_ai/_vendor/openai/types/image_edit_partial_image_event.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ImageEditPartialImageEvent"] + + +class ImageEditPartialImageEvent(BaseModel): + b64_json: str + """Base64-encoded partial image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the requested edited image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the requested edited image.""" + + partial_image_index: int + """0-based index for the partial image (streaming).""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the requested edited image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the requested edited image.""" + + type: Literal["image_edit.partial_image"] + """The type of the event. Always `image_edit.partial_image`.""" diff --git a/portkey_ai/_vendor/openai/types/image_edit_stream_event.py b/portkey_ai/_vendor/openai/types/image_edit_stream_event.py new file mode 100644 index 00000000..759f6c6d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/image_edit_stream_event.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from .._utils import PropertyInfo +from .image_edit_completed_event import ImageEditCompletedEvent +from .image_edit_partial_image_event import ImageEditPartialImageEvent + +__all__ = ["ImageEditStreamEvent"] + +ImageEditStreamEvent: TypeAlias = Annotated[ + Union[ImageEditPartialImageEvent, ImageEditCompletedEvent], PropertyInfo(discriminator="type") +] diff --git a/portkey_ai/_vendor/openai/types/image_gen_completed_event.py b/portkey_ai/_vendor/openai/types/image_gen_completed_event.py new file mode 100644 index 00000000..e78da842 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/image_gen_completed_event.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ImageGenCompletedEvent", "Usage", "UsageInputTokensDetails"] + + +class UsageInputTokensDetails(BaseModel): + image_tokens: int + """The number of image tokens in the input prompt.""" + + text_tokens: int + """The number of text tokens in the input prompt.""" + + +class Usage(BaseModel): + input_tokens: int + """The number of tokens (images and text) in the input prompt.""" + + input_tokens_details: UsageInputTokensDetails + """The input tokens detailed information for the image generation.""" + + output_tokens: int + """The number of image tokens in the output image.""" + + total_tokens: int + """The total number of tokens (images and text) used for the image generation.""" + + +class ImageGenCompletedEvent(BaseModel): + b64_json: str + """Base64-encoded image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the generated image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the generated image.""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the generated image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the generated image.""" + + type: Literal["image_generation.completed"] + """The type of the event. Always `image_generation.completed`.""" + + usage: Usage + """For `gpt-image-1` only, the token usage information for the image generation.""" diff --git a/portkey_ai/_vendor/openai/types/image_gen_partial_image_event.py b/portkey_ai/_vendor/openai/types/image_gen_partial_image_event.py new file mode 100644 index 00000000..965d4506 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/image_gen_partial_image_event.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ImageGenPartialImageEvent"] + + +class ImageGenPartialImageEvent(BaseModel): + b64_json: str + """Base64-encoded partial image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the requested image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the requested image.""" + + partial_image_index: int + """0-based index for the partial image (streaming).""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the requested image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the requested image.""" + + type: Literal["image_generation.partial_image"] + """The type of the event. Always `image_generation.partial_image`.""" diff --git a/portkey_ai/_vendor/openai/types/image_gen_stream_event.py b/portkey_ai/_vendor/openai/types/image_gen_stream_event.py new file mode 100644 index 00000000..7dde5d52 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/image_gen_stream_event.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from .._utils import PropertyInfo +from .image_gen_completed_event import ImageGenCompletedEvent +from .image_gen_partial_image_event import ImageGenPartialImageEvent + +__all__ = ["ImageGenStreamEvent"] + +ImageGenStreamEvent: TypeAlias = Annotated[ + Union[ImageGenPartialImageEvent, ImageGenCompletedEvent], PropertyInfo(discriminator="type") +] diff --git a/portkey_ai/_vendor/openai/types/image_generate_params.py b/portkey_ai/_vendor/openai/types/image_generate_params.py index 8fc10220..e9e9292c 100644 --- a/portkey_ai/_vendor/openai/types/image_generate_params.py +++ b/portkey_ai/_vendor/openai/types/image_generate_params.py @@ -7,10 +7,10 @@ from .image_model import ImageModel -__all__ = ["ImageGenerateParams"] +__all__ = ["ImageGenerateParamsBase", "ImageGenerateParamsNonStreaming", "ImageGenerateParamsStreaming"] -class ImageGenerateParams(TypedDict, total=False): +class ImageGenerateParamsBase(TypedDict, total=False): prompt: Required[str] """A text description of the desired image(s). @@ -62,6 +62,17 @@ class ImageGenerateParams(TypedDict, total=False): `jpeg`, or `webp`. """ + partial_images: Optional[int] + """The number of partial images to generate. + + This parameter is used for streaming responses that return partial images. Value + must be between 0 and 3. When set to 0, the response will be a single image sent + in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + """ + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] """The quality of the image that will be generated. @@ -107,3 +118,26 @@ class ImageGenerateParams(TypedDict, total=False): and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ + + +class ImageGenerateParamsNonStreaming(ImageGenerateParamsBase, total=False): + stream: Optional[Literal[False]] + """Generate the image in streaming mode. + + Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + """ + + +class ImageGenerateParamsStreaming(ImageGenerateParamsBase): + stream: Required[Literal[True]] + """Generate the image in streaming mode. + + Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + """ + + +ImageGenerateParams = Union[ImageGenerateParamsNonStreaming, ImageGenerateParamsStreaming] diff --git a/portkey_ai/_vendor/openai/types/images_response.py b/portkey_ai/_vendor/openai/types/images_response.py index df454afa..89cc71df 100644 --- a/portkey_ai/_vendor/openai/types/images_response.py +++ b/portkey_ai/_vendor/openai/types/images_response.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Optional +from typing_extensions import Literal from .image import Image from .._models import BaseModel @@ -24,7 +25,7 @@ class Usage(BaseModel): """The input tokens detailed information for the image generation.""" output_tokens: int - """The number of image tokens in the output image.""" + """The number of output tokens generated by the model.""" total_tokens: int """The total number of tokens (images and text) used for the image generation.""" @@ -34,8 +35,26 @@ class ImagesResponse(BaseModel): created: int """The Unix timestamp (in seconds) of when the image was created.""" + background: Optional[Literal["transparent", "opaque"]] = None + """The background parameter used for the image generation. + + Either `transparent` or `opaque`. + """ + data: Optional[List[Image]] = None """The list of generated images.""" + output_format: Optional[Literal["png", "webp", "jpeg"]] = None + """The output format of the image generation. Either `png`, `webp`, or `jpeg`.""" + + quality: Optional[Literal["low", "medium", "high"]] = None + """The quality of the image generated. Either `low`, `medium`, or `high`.""" + + size: Optional[Literal["1024x1024", "1024x1536", "1536x1024"]] = None + """The size of the image generated. + + Either `1024x1024`, `1024x1536`, or `1536x1024`. + """ + usage: Optional[Usage] = None """For `gpt-image-1` only, the token usage information for the image generation.""" diff --git a/portkey_ai/_vendor/openai/types/moderation_create_params.py b/portkey_ai/_vendor/openai/types/moderation_create_params.py index 3ea2f3cd..65d9b7e5 100644 --- a/portkey_ai/_vendor/openai/types/moderation_create_params.py +++ b/portkey_ai/_vendor/openai/types/moderation_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Union, Iterable +from typing import Union, Iterable from typing_extensions import Required, TypedDict +from .._types import SequenceNotStr from .moderation_model import ModerationModel from .moderation_multi_modal_input_param import ModerationMultiModalInputParam @@ -12,7 +13,7 @@ class ModerationCreateParams(TypedDict, total=False): - input: Required[Union[str, List[str], Iterable[ModerationMultiModalInputParam]]] + input: Required[Union[str, SequenceNotStr[str], Iterable[ModerationMultiModalInputParam]]] """Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input diff --git a/portkey_ai/_vendor/openai/types/realtime/__init__.py b/portkey_ai/_vendor/openai/types/realtime/__init__.py new file mode 100644 index 00000000..2d947c8a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/__init__.py @@ -0,0 +1,233 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .realtime_error import RealtimeError as RealtimeError +from .conversation_item import ConversationItem as ConversationItem +from .realtime_response import RealtimeResponse as RealtimeResponse +from .audio_transcription import AudioTranscription as AudioTranscription +from .log_prob_properties import LogProbProperties as LogProbProperties +from .realtime_truncation import RealtimeTruncation as RealtimeTruncation +from .response_done_event import ResponseDoneEvent as ResponseDoneEvent +from .noise_reduction_type import NoiseReductionType as NoiseReductionType +from .realtime_error_event import RealtimeErrorEvent as RealtimeErrorEvent +from .session_update_event import SessionUpdateEvent as SessionUpdateEvent +from .mcp_list_tools_failed import McpListToolsFailed as McpListToolsFailed +from .realtime_audio_config import RealtimeAudioConfig as RealtimeAudioConfig +from .realtime_client_event import RealtimeClientEvent as RealtimeClientEvent +from .realtime_server_event import RealtimeServerEvent as RealtimeServerEvent +from .realtime_tools_config import RealtimeToolsConfig as RealtimeToolsConfig +from .response_cancel_event import ResponseCancelEvent as ResponseCancelEvent +from .response_create_event import ResponseCreateEvent as ResponseCreateEvent +from .session_created_event import SessionCreatedEvent as SessionCreatedEvent +from .session_updated_event import SessionUpdatedEvent as SessionUpdatedEvent +from .conversation_item_done import ConversationItemDone as ConversationItemDone +from .realtime_audio_formats import RealtimeAudioFormats as RealtimeAudioFormats +from .realtime_function_tool import RealtimeFunctionTool as RealtimeFunctionTool +from .realtime_mcp_tool_call import RealtimeMcpToolCall as RealtimeMcpToolCall +from .realtime_mcphttp_error import RealtimeMcphttpError as RealtimeMcphttpError +from .response_created_event import ResponseCreatedEvent as ResponseCreatedEvent +from .conversation_item_added import ConversationItemAdded as ConversationItemAdded +from .conversation_item_param import ConversationItemParam as ConversationItemParam +from .realtime_connect_params import RealtimeConnectParams as RealtimeConnectParams +from .realtime_mcp_list_tools import RealtimeMcpListTools as RealtimeMcpListTools +from .realtime_response_usage import RealtimeResponseUsage as RealtimeResponseUsage +from .realtime_tracing_config import RealtimeTracingConfig as RealtimeTracingConfig +from .mcp_list_tools_completed import McpListToolsCompleted as McpListToolsCompleted +from .realtime_response_status import RealtimeResponseStatus as RealtimeResponseStatus +from .response_mcp_call_failed import ResponseMcpCallFailed as ResponseMcpCallFailed +from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent +from .audio_transcription_param import AudioTranscriptionParam as AudioTranscriptionParam +from .rate_limits_updated_event import RateLimitsUpdatedEvent as RateLimitsUpdatedEvent +from .realtime_truncation_param import RealtimeTruncationParam as RealtimeTruncationParam +from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent +from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent +from .conversation_created_event import ConversationCreatedEvent as ConversationCreatedEvent +from .mcp_list_tools_in_progress import McpListToolsInProgress as McpListToolsInProgress +from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent +from .session_update_event_param import SessionUpdateEventParam as SessionUpdateEventParam +from .client_secret_create_params import ClientSecretCreateParams as ClientSecretCreateParams +from .realtime_audio_config_input import RealtimeAudioConfigInput as RealtimeAudioConfigInput +from .realtime_audio_config_param import RealtimeAudioConfigParam as RealtimeAudioConfigParam +from .realtime_client_event_param import RealtimeClientEventParam as RealtimeClientEventParam +from .realtime_mcp_protocol_error import RealtimeMcpProtocolError as RealtimeMcpProtocolError +from .realtime_tool_choice_config import RealtimeToolChoiceConfig as RealtimeToolChoiceConfig +from .realtime_tools_config_param import RealtimeToolsConfigParam as RealtimeToolsConfigParam +from .realtime_tools_config_union import RealtimeToolsConfigUnion as RealtimeToolsConfigUnion +from .response_cancel_event_param import ResponseCancelEventParam as ResponseCancelEventParam +from .response_create_event_param import ResponseCreateEventParam as ResponseCreateEventParam +from .response_mcp_call_completed import ResponseMcpCallCompleted as ResponseMcpCallCompleted +from .realtime_audio_config_output import RealtimeAudioConfigOutput as RealtimeAudioConfigOutput +from .realtime_audio_formats_param import RealtimeAudioFormatsParam as RealtimeAudioFormatsParam +from .realtime_function_tool_param import RealtimeFunctionToolParam as RealtimeFunctionToolParam +from .realtime_mcp_tool_call_param import RealtimeMcpToolCallParam as RealtimeMcpToolCallParam +from .realtime_mcphttp_error_param import RealtimeMcphttpErrorParam as RealtimeMcphttpErrorParam +from .client_secret_create_response import ClientSecretCreateResponse as ClientSecretCreateResponse +from .realtime_mcp_approval_request import RealtimeMcpApprovalRequest as RealtimeMcpApprovalRequest +from .realtime_mcp_list_tools_param import RealtimeMcpListToolsParam as RealtimeMcpListToolsParam +from .realtime_tracing_config_param import RealtimeTracingConfigParam as RealtimeTracingConfigParam +from .response_mcp_call_in_progress import ResponseMcpCallInProgress as ResponseMcpCallInProgress +from .conversation_item_create_event import ConversationItemCreateEvent as ConversationItemCreateEvent +from .conversation_item_delete_event import ConversationItemDeleteEvent as ConversationItemDeleteEvent +from .input_audio_buffer_clear_event import InputAudioBufferClearEvent as InputAudioBufferClearEvent +from .realtime_mcp_approval_response import RealtimeMcpApprovalResponse as RealtimeMcpApprovalResponse +from .realtime_session_client_secret import RealtimeSessionClientSecret as RealtimeSessionClientSecret +from .conversation_item_created_event import ConversationItemCreatedEvent as ConversationItemCreatedEvent +from .conversation_item_deleted_event import ConversationItemDeletedEvent as ConversationItemDeletedEvent +from .input_audio_buffer_append_event import InputAudioBufferAppendEvent as InputAudioBufferAppendEvent +from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent as InputAudioBufferCommitEvent +from .output_audio_buffer_clear_event import OutputAudioBufferClearEvent as OutputAudioBufferClearEvent +from .realtime_response_create_params import RealtimeResponseCreateParams as RealtimeResponseCreateParams +from .realtime_session_create_request import RealtimeSessionCreateRequest as RealtimeSessionCreateRequest +from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent +from .conversation_item_retrieve_event import ConversationItemRetrieveEvent as ConversationItemRetrieveEvent +from .conversation_item_truncate_event import ConversationItemTruncateEvent as ConversationItemTruncateEvent +from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent as InputAudioBufferClearedEvent +from .realtime_session_create_response import RealtimeSessionCreateResponse as RealtimeSessionCreateResponse +from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent +from .response_mcp_call_arguments_done import ResponseMcpCallArgumentsDone as ResponseMcpCallArgumentsDone +from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent +from .conversation_item_truncated_event import ConversationItemTruncatedEvent as ConversationItemTruncatedEvent +from .realtime_audio_config_input_param import RealtimeAudioConfigInputParam as RealtimeAudioConfigInputParam +from .realtime_mcp_protocol_error_param import RealtimeMcpProtocolErrorParam as RealtimeMcpProtocolErrorParam +from .realtime_mcp_tool_execution_error import RealtimeMcpToolExecutionError as RealtimeMcpToolExecutionError +from .realtime_response_create_mcp_tool import RealtimeResponseCreateMcpTool as RealtimeResponseCreateMcpTool +from .realtime_tool_choice_config_param import RealtimeToolChoiceConfigParam as RealtimeToolChoiceConfigParam +from .realtime_tools_config_union_param import RealtimeToolsConfigUnionParam as RealtimeToolsConfigUnionParam +from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent +from .response_mcp_call_arguments_delta import ResponseMcpCallArgumentsDelta as ResponseMcpCallArgumentsDelta +from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent +from .realtime_audio_config_output_param import RealtimeAudioConfigOutputParam as RealtimeAudioConfigOutputParam +from .realtime_audio_input_turn_detection import RealtimeAudioInputTurnDetection as RealtimeAudioInputTurnDetection +from .realtime_mcp_approval_request_param import RealtimeMcpApprovalRequestParam as RealtimeMcpApprovalRequestParam +from .realtime_truncation_retention_ratio import RealtimeTruncationRetentionRatio as RealtimeTruncationRetentionRatio +from .conversation_item_create_event_param import ConversationItemCreateEventParam as ConversationItemCreateEventParam +from .conversation_item_delete_event_param import ConversationItemDeleteEventParam as ConversationItemDeleteEventParam +from .input_audio_buffer_clear_event_param import InputAudioBufferClearEventParam as InputAudioBufferClearEventParam +from .input_audio_buffer_timeout_triggered import InputAudioBufferTimeoutTriggered as InputAudioBufferTimeoutTriggered +from .realtime_mcp_approval_response_param import RealtimeMcpApprovalResponseParam as RealtimeMcpApprovalResponseParam +from .realtime_transcription_session_audio import RealtimeTranscriptionSessionAudio as RealtimeTranscriptionSessionAudio +from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent +from .input_audio_buffer_append_event_param import InputAudioBufferAppendEventParam as InputAudioBufferAppendEventParam +from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventParam as InputAudioBufferCommitEventParam +from .output_audio_buffer_clear_event_param import OutputAudioBufferClearEventParam as OutputAudioBufferClearEventParam +from .realtime_response_create_audio_output import ( + RealtimeResponseCreateAudioOutput as RealtimeResponseCreateAudioOutput, +) +from .realtime_response_create_params_param import ( + RealtimeResponseCreateParamsParam as RealtimeResponseCreateParamsParam, +) +from .realtime_session_create_request_param import ( + RealtimeSessionCreateRequestParam as RealtimeSessionCreateRequestParam, +) +from .response_audio_transcript_delta_event import ( + ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, +) +from .conversation_item_retrieve_event_param import ( + ConversationItemRetrieveEventParam as ConversationItemRetrieveEventParam, +) +from .conversation_item_truncate_event_param import ( + ConversationItemTruncateEventParam as ConversationItemTruncateEventParam, +) +from .input_audio_buffer_speech_started_event import ( + InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, +) +from .input_audio_buffer_speech_stopped_event import ( + InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, +) +from .realtime_conversation_item_user_message import ( + RealtimeConversationItemUserMessage as RealtimeConversationItemUserMessage, +) +from .realtime_mcp_tool_execution_error_param import ( + RealtimeMcpToolExecutionErrorParam as RealtimeMcpToolExecutionErrorParam, +) +from .realtime_response_create_mcp_tool_param import ( + RealtimeResponseCreateMcpToolParam as RealtimeResponseCreateMcpToolParam, +) +from .realtime_conversation_item_function_call import ( + RealtimeConversationItemFunctionCall as RealtimeConversationItemFunctionCall, +) +from .realtime_audio_input_turn_detection_param import ( + RealtimeAudioInputTurnDetectionParam as RealtimeAudioInputTurnDetectionParam, +) +from .realtime_conversation_item_system_message import ( + RealtimeConversationItemSystemMessage as RealtimeConversationItemSystemMessage, +) +from .realtime_truncation_retention_ratio_param import ( + RealtimeTruncationRetentionRatioParam as RealtimeTruncationRetentionRatioParam, +) +from .realtime_transcription_session_audio_input import ( + RealtimeTranscriptionSessionAudioInput as RealtimeTranscriptionSessionAudioInput, +) +from .realtime_transcription_session_audio_param import ( + RealtimeTranscriptionSessionAudioParam as RealtimeTranscriptionSessionAudioParam, +) +from .realtime_response_create_audio_output_param import ( + RealtimeResponseCreateAudioOutputParam as RealtimeResponseCreateAudioOutputParam, +) +from .realtime_response_usage_input_token_details import ( + RealtimeResponseUsageInputTokenDetails as RealtimeResponseUsageInputTokenDetails, +) +from .response_function_call_arguments_done_event import ( + ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, +) +from .realtime_conversation_item_assistant_message import ( + RealtimeConversationItemAssistantMessage as RealtimeConversationItemAssistantMessage, +) +from .realtime_response_usage_output_token_details import ( + RealtimeResponseUsageOutputTokenDetails as RealtimeResponseUsageOutputTokenDetails, +) +from .response_function_call_arguments_delta_event import ( + ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, +) +from .realtime_conversation_item_user_message_param import ( + RealtimeConversationItemUserMessageParam as RealtimeConversationItemUserMessageParam, +) +from .realtime_transcription_session_create_request import ( + RealtimeTranscriptionSessionCreateRequest as RealtimeTranscriptionSessionCreateRequest, +) +from .realtime_transcription_session_turn_detection import ( + RealtimeTranscriptionSessionTurnDetection as RealtimeTranscriptionSessionTurnDetection, +) +from .realtime_conversation_item_function_call_param import ( + RealtimeConversationItemFunctionCallParam as RealtimeConversationItemFunctionCallParam, +) +from .realtime_transcription_session_create_response import ( + RealtimeTranscriptionSessionCreateResponse as RealtimeTranscriptionSessionCreateResponse, +) +from .realtime_conversation_item_function_call_output import ( + RealtimeConversationItemFunctionCallOutput as RealtimeConversationItemFunctionCallOutput, +) +from .realtime_conversation_item_system_message_param import ( + RealtimeConversationItemSystemMessageParam as RealtimeConversationItemSystemMessageParam, +) +from .realtime_transcription_session_audio_input_param import ( + RealtimeTranscriptionSessionAudioInputParam as RealtimeTranscriptionSessionAudioInputParam, +) +from .realtime_conversation_item_assistant_message_param import ( + RealtimeConversationItemAssistantMessageParam as RealtimeConversationItemAssistantMessageParam, +) +from .conversation_item_input_audio_transcription_segment import ( + ConversationItemInputAudioTranscriptionSegment as ConversationItemInputAudioTranscriptionSegment, +) +from .realtime_transcription_session_create_request_param import ( + RealtimeTranscriptionSessionCreateRequestParam as RealtimeTranscriptionSessionCreateRequestParam, +) +from .realtime_conversation_item_function_call_output_param import ( + RealtimeConversationItemFunctionCallOutputParam as RealtimeConversationItemFunctionCallOutputParam, +) +from .conversation_item_input_audio_transcription_delta_event import ( + ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent, +) +from .conversation_item_input_audio_transcription_failed_event import ( + ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, +) +from .realtime_transcription_session_audio_input_turn_detection import ( + RealtimeTranscriptionSessionAudioInputTurnDetection as RealtimeTranscriptionSessionAudioInputTurnDetection, +) +from .conversation_item_input_audio_transcription_completed_event import ( + ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, +) +from .realtime_transcription_session_audio_input_turn_detection_param import ( + RealtimeTranscriptionSessionAudioInputTurnDetectionParam as RealtimeTranscriptionSessionAudioInputTurnDetectionParam, +) diff --git a/portkey_ai/_vendor/openai/types/realtime/audio_transcription.py b/portkey_ai/_vendor/openai/types/realtime/audio_transcription.py new file mode 100644 index 00000000..cf662b3a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/audio_transcription.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["AudioTranscription"] + + +class AudioTranscription(BaseModel): + language: Optional[str] = None + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + + model: Optional[Literal["whisper-1", "gpt-4o-transcribe-latest", "gpt-4o-mini-transcribe", "gpt-4o-transcribe"]] = ( + None + ) + """The model to use for transcription. + + Current options are `whisper-1`, `gpt-4o-transcribe-latest`, + `gpt-4o-mini-transcribe`, and `gpt-4o-transcribe`. + """ + + prompt: Optional[str] = None + """ + An optional text to guide the model's style or continue a previous audio + segment. For `whisper-1`, the + [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + For `gpt-4o-transcribe` models, the prompt is a free text string, for example + "expect words related to technology". + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/audio_transcription_param.py b/portkey_ai/_vendor/openai/types/realtime/audio_transcription_param.py new file mode 100644 index 00000000..fb09f105 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/audio_transcription_param.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["AudioTranscriptionParam"] + + +class AudioTranscriptionParam(TypedDict, total=False): + language: str + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + + model: Literal["whisper-1", "gpt-4o-transcribe-latest", "gpt-4o-mini-transcribe", "gpt-4o-transcribe"] + """The model to use for transcription. + + Current options are `whisper-1`, `gpt-4o-transcribe-latest`, + `gpt-4o-mini-transcribe`, and `gpt-4o-transcribe`. + """ + + prompt: str + """ + An optional text to guide the model's style or continue a previous audio + segment. For `whisper-1`, the + [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + For `gpt-4o-transcribe` models, the prompt is a free text string, for example + "expect words related to technology". + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/client_secret_create_params.py b/portkey_ai/_vendor/openai/types/realtime/client_secret_create_params.py new file mode 100644 index 00000000..5f0b0d79 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/client_secret_create_params.py @@ -0,0 +1,46 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypeAlias, TypedDict + +from .realtime_session_create_request_param import RealtimeSessionCreateRequestParam +from .realtime_transcription_session_create_request_param import RealtimeTranscriptionSessionCreateRequestParam + +__all__ = ["ClientSecretCreateParams", "ExpiresAfter", "Session"] + + +class ClientSecretCreateParams(TypedDict, total=False): + expires_after: ExpiresAfter + """Configuration for the client secret expiration. + + Expiration refers to the time after which a client secret will no longer be + valid for creating sessions. The session itself may continue after that time + once started. A secret can be used to create multiple sessions until it expires. + """ + + session: Session + """Session configuration to use for the client secret. + + Choose either a realtime session or a transcription session. + """ + + +class ExpiresAfter(TypedDict, total=False): + anchor: Literal["created_at"] + """ + The anchor point for the client secret expiration, meaning that `seconds` will + be added to the `created_at` time of the client secret to produce an expiration + timestamp. Only `created_at` is currently supported. + """ + + seconds: int + """The number of seconds from the anchor point to the expiration. + + Select a value between `10` and `7200` (2 hours). This default to 600 seconds + (10 minutes) if not specified. + """ + + +Session: TypeAlias = Union[RealtimeSessionCreateRequestParam, RealtimeTranscriptionSessionCreateRequestParam] diff --git a/portkey_ai/_vendor/openai/types/realtime/client_secret_create_response.py b/portkey_ai/_vendor/openai/types/realtime/client_secret_create_response.py new file mode 100644 index 00000000..2aed66a2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/client_secret_create_response.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .realtime_session_create_response import RealtimeSessionCreateResponse +from .realtime_transcription_session_create_response import RealtimeTranscriptionSessionCreateResponse + +__all__ = ["ClientSecretCreateResponse", "Session"] + +Session: TypeAlias = Annotated[ + Union[RealtimeSessionCreateResponse, RealtimeTranscriptionSessionCreateResponse], PropertyInfo(discriminator="type") +] + + +class ClientSecretCreateResponse(BaseModel): + expires_at: int + """Expiration timestamp for the client secret, in seconds since epoch.""" + + session: Session + """The session configuration for either a realtime or transcription session.""" + + value: str + """The generated client secret value.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_created_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_created_event.py new file mode 100644 index 00000000..6ec1dc8c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_created_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ConversationCreatedEvent", "Conversation"] + + +class Conversation(BaseModel): + id: Optional[str] = None + """The unique ID of the conversation.""" + + object: Optional[Literal["realtime.conversation"]] = None + """The object type, must be `realtime.conversation`.""" + + +class ConversationCreatedEvent(BaseModel): + conversation: Conversation + """The conversation resource.""" + + event_id: str + """The unique ID of the server event.""" + + type: Literal["conversation.created"] + """The event type, must be `conversation.created`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item.py new file mode 100644 index 00000000..be021520 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .realtime_mcp_tool_call import RealtimeMcpToolCall +from .realtime_mcp_list_tools import RealtimeMcpListTools +from .realtime_mcp_approval_request import RealtimeMcpApprovalRequest +from .realtime_mcp_approval_response import RealtimeMcpApprovalResponse +from .realtime_conversation_item_user_message import RealtimeConversationItemUserMessage +from .realtime_conversation_item_function_call import RealtimeConversationItemFunctionCall +from .realtime_conversation_item_system_message import RealtimeConversationItemSystemMessage +from .realtime_conversation_item_assistant_message import RealtimeConversationItemAssistantMessage +from .realtime_conversation_item_function_call_output import RealtimeConversationItemFunctionCallOutput + +__all__ = ["ConversationItem"] + +ConversationItem: TypeAlias = Annotated[ + Union[ + RealtimeConversationItemSystemMessage, + RealtimeConversationItemUserMessage, + RealtimeConversationItemAssistantMessage, + RealtimeConversationItemFunctionCall, + RealtimeConversationItemFunctionCallOutput, + RealtimeMcpApprovalResponse, + RealtimeMcpListTools, + RealtimeMcpToolCall, + RealtimeMcpApprovalRequest, + ], + PropertyInfo(discriminator="type"), +] diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_added.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_added.py new file mode 100644 index 00000000..ae9f6803 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_added.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ConversationItemAdded"] + + +class ConversationItemAdded(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """A single item within a Realtime conversation.""" + + type: Literal["conversation.item.added"] + """The event type, must be `conversation.item.added`.""" + + previous_item_id: Optional[str] = None + """The ID of the item that precedes this one, if any. + + This is used to maintain ordering when items are inserted. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event.py new file mode 100644 index 00000000..8fa2dfe0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ConversationItemCreateEvent"] + + +class ConversationItemCreateEvent(BaseModel): + item: ConversationItem + """A single item within a Realtime conversation.""" + + type: Literal["conversation.item.create"] + """The event type, must be `conversation.item.create`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" + + previous_item_id: Optional[str] = None + """The ID of the preceding item after which the new item will be inserted. + + If not set, the new item will be appended to the end of the conversation. If set + to `root`, the new item will be added to the beginning of the conversation. If + set to an existing ID, it allows an item to be inserted mid-conversation. If the + ID cannot be found, an error will be returned and the item will not be added. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event_param.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event_param.py new file mode 100644 index 00000000..8530dc72 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_create_event_param.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .conversation_item_param import ConversationItemParam + +__all__ = ["ConversationItemCreateEventParam"] + + +class ConversationItemCreateEventParam(TypedDict, total=False): + item: Required[ConversationItemParam] + """A single item within a Realtime conversation.""" + + type: Required[Literal["conversation.item.create"]] + """The event type, must be `conversation.item.create`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" + + previous_item_id: str + """The ID of the preceding item after which the new item will be inserted. + + If not set, the new item will be appended to the end of the conversation. If set + to `root`, the new item will be added to the beginning of the conversation. If + set to an existing ID, it allows an item to be inserted mid-conversation. If the + ID cannot be found, an error will be returned and the item will not be added. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_created_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_created_event.py new file mode 100644 index 00000000..13f24ad3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_created_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ConversationItemCreatedEvent"] + + +class ConversationItemCreatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """A single item within a Realtime conversation.""" + + type: Literal["conversation.item.created"] + """The event type, must be `conversation.item.created`.""" + + previous_item_id: Optional[str] = None + """ + The ID of the preceding item in the Conversation context, allows the client to + understand the order of the conversation. Can be `null` if the item has no + predecessor. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event.py new file mode 100644 index 00000000..3734f72e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ConversationItemDeleteEvent"] + + +class ConversationItemDeleteEvent(BaseModel): + item_id: str + """The ID of the item to delete.""" + + type: Literal["conversation.item.delete"] + """The event type, must be `conversation.item.delete`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event_param.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event_param.py new file mode 100644 index 00000000..c3f88d66 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_delete_event_param.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ConversationItemDeleteEventParam"] + + +class ConversationItemDeleteEventParam(TypedDict, total=False): + item_id: Required[str] + """The ID of the item to delete.""" + + type: Required[Literal["conversation.item.delete"]] + """The event type, must be `conversation.item.delete`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_deleted_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_deleted_event.py new file mode 100644 index 00000000..cfe6fe85 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_deleted_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ConversationItemDeletedEvent"] + + +class ConversationItemDeletedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item that was deleted.""" + + type: Literal["conversation.item.deleted"] + """The event type, must be `conversation.item.deleted`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_done.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_done.py new file mode 100644 index 00000000..a4c9b8a8 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_done.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ConversationItemDone"] + + +class ConversationItemDone(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """A single item within a Realtime conversation.""" + + type: Literal["conversation.item.done"] + """The event type, must be `conversation.item.done`.""" + + previous_item_id: Optional[str] = None + """The ID of the item that precedes this one, if any. + + This is used to maintain ordering when items are inserted. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py new file mode 100644 index 00000000..09b20aa1 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py @@ -0,0 +1,79 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from .log_prob_properties import LogProbProperties + +__all__ = [ + "ConversationItemInputAudioTranscriptionCompletedEvent", + "Usage", + "UsageTranscriptTextUsageTokens", + "UsageTranscriptTextUsageTokensInputTokenDetails", + "UsageTranscriptTextUsageDuration", +] + + +class UsageTranscriptTextUsageTokensInputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """Number of audio tokens billed for this request.""" + + text_tokens: Optional[int] = None + """Number of text tokens billed for this request.""" + + +class UsageTranscriptTextUsageTokens(BaseModel): + input_tokens: int + """Number of input tokens billed for this request.""" + + output_tokens: int + """Number of output tokens generated.""" + + total_tokens: int + """Total number of tokens used (input + output).""" + + type: Literal["tokens"] + """The type of the usage object. Always `tokens` for this variant.""" + + input_token_details: Optional[UsageTranscriptTextUsageTokensInputTokenDetails] = None + """Details about the input tokens billed for this request.""" + + +class UsageTranscriptTextUsageDuration(BaseModel): + seconds: float + """Duration of the input audio in seconds.""" + + type: Literal["duration"] + """The type of the usage object. Always `duration` for this variant.""" + + +Usage: TypeAlias = Union[UsageTranscriptTextUsageTokens, UsageTranscriptTextUsageDuration] + + +class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel): + content_index: int + """The index of the content part containing the audio.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item containing the audio that is being transcribed.""" + + transcript: str + """The transcribed text.""" + + type: Literal["conversation.item.input_audio_transcription.completed"] + """ + The event type, must be `conversation.item.input_audio_transcription.completed`. + """ + + usage: Usage + """ + Usage statistics for the transcription, this is billed according to the ASR + model's pricing rather than the realtime model's pricing. + """ + + logprobs: Optional[List[LogProbProperties]] = None + """The log probabilities of the transcription.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py new file mode 100644 index 00000000..f49e6f63 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .log_prob_properties import LogProbProperties + +__all__ = ["ConversationItemInputAudioTranscriptionDeltaEvent"] + + +class ConversationItemInputAudioTranscriptionDeltaEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item containing the audio that is being transcribed.""" + + type: Literal["conversation.item.input_audio_transcription.delta"] + """The event type, must be `conversation.item.input_audio_transcription.delta`.""" + + content_index: Optional[int] = None + """The index of the content part in the item's content array.""" + + delta: Optional[str] = None + """The text delta.""" + + logprobs: Optional[List[LogProbProperties]] = None + """The log probabilities of the transcription. + + These can be enabled by configurating the session with + `"include": ["item.input_audio_transcription.logprobs"]`. Each entry in the + array corresponds a log probability of which token would be selected for this + chunk of transcription. This can help to identify if it was possible there were + multiple valid options for a given chunk of transcription. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py new file mode 100644 index 00000000..edb97bbf --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ConversationItemInputAudioTranscriptionFailedEvent", "Error"] + + +class Error(BaseModel): + code: Optional[str] = None + """Error code, if any.""" + + message: Optional[str] = None + """A human-readable error message.""" + + param: Optional[str] = None + """Parameter related to the error, if any.""" + + type: Optional[str] = None + """The type of error.""" + + +class ConversationItemInputAudioTranscriptionFailedEvent(BaseModel): + content_index: int + """The index of the content part containing the audio.""" + + error: Error + """Details of the transcription error.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item.""" + + type: Literal["conversation.item.input_audio_transcription.failed"] + """The event type, must be `conversation.item.input_audio_transcription.failed`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_segment.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_segment.py new file mode 100644 index 00000000..e2cbc9d2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_input_audio_transcription_segment.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ConversationItemInputAudioTranscriptionSegment"] + + +class ConversationItemInputAudioTranscriptionSegment(BaseModel): + id: str + """The segment identifier.""" + + content_index: int + """The index of the input audio content part within the item.""" + + end: float + """End time of the segment in seconds.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item containing the input audio content.""" + + speaker: str + """The detected speaker label for this segment.""" + + start: float + """Start time of the segment in seconds.""" + + text: str + """The text for this segment.""" + + type: Literal["conversation.item.input_audio_transcription.segment"] + """The event type, must be `conversation.item.input_audio_transcription.segment`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_param.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_param.py new file mode 100644 index 00000000..c8b442ec --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_param.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .realtime_mcp_tool_call_param import RealtimeMcpToolCallParam +from .realtime_mcp_list_tools_param import RealtimeMcpListToolsParam +from .realtime_mcp_approval_request_param import RealtimeMcpApprovalRequestParam +from .realtime_mcp_approval_response_param import RealtimeMcpApprovalResponseParam +from .realtime_conversation_item_user_message_param import RealtimeConversationItemUserMessageParam +from .realtime_conversation_item_function_call_param import RealtimeConversationItemFunctionCallParam +from .realtime_conversation_item_system_message_param import RealtimeConversationItemSystemMessageParam +from .realtime_conversation_item_assistant_message_param import RealtimeConversationItemAssistantMessageParam +from .realtime_conversation_item_function_call_output_param import RealtimeConversationItemFunctionCallOutputParam + +__all__ = ["ConversationItemParam"] + +ConversationItemParam: TypeAlias = Union[ + RealtimeConversationItemSystemMessageParam, + RealtimeConversationItemUserMessageParam, + RealtimeConversationItemAssistantMessageParam, + RealtimeConversationItemFunctionCallParam, + RealtimeConversationItemFunctionCallOutputParam, + RealtimeMcpApprovalResponseParam, + RealtimeMcpListToolsParam, + RealtimeMcpToolCallParam, + RealtimeMcpApprovalRequestParam, +] diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event.py new file mode 100644 index 00000000..018c2ccc --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ConversationItemRetrieveEvent"] + + +class ConversationItemRetrieveEvent(BaseModel): + item_id: str + """The ID of the item to retrieve.""" + + type: Literal["conversation.item.retrieve"] + """The event type, must be `conversation.item.retrieve`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event_param.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event_param.py new file mode 100644 index 00000000..71b3ffa4 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_retrieve_event_param.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ConversationItemRetrieveEventParam"] + + +class ConversationItemRetrieveEventParam(TypedDict, total=False): + item_id: Required[str] + """The ID of the item to retrieve.""" + + type: Required[Literal["conversation.item.retrieve"]] + """The event type, must be `conversation.item.retrieve`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event.py new file mode 100644 index 00000000..d6c6779c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ConversationItemTruncateEvent"] + + +class ConversationItemTruncateEvent(BaseModel): + audio_end_ms: int + """Inclusive duration up to which audio is truncated, in milliseconds. + + If the audio_end_ms is greater than the actual audio duration, the server will + respond with an error. + """ + + content_index: int + """The index of the content part to truncate. Set this to `0`.""" + + item_id: str + """The ID of the assistant message item to truncate. + + Only assistant message items can be truncated. + """ + + type: Literal["conversation.item.truncate"] + """The event type, must be `conversation.item.truncate`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event_param.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event_param.py new file mode 100644 index 00000000..f5ab13a4 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncate_event_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ConversationItemTruncateEventParam"] + + +class ConversationItemTruncateEventParam(TypedDict, total=False): + audio_end_ms: Required[int] + """Inclusive duration up to which audio is truncated, in milliseconds. + + If the audio_end_ms is greater than the actual audio duration, the server will + respond with an error. + """ + + content_index: Required[int] + """The index of the content part to truncate. Set this to `0`.""" + + item_id: Required[str] + """The ID of the assistant message item to truncate. + + Only assistant message items can be truncated. + """ + + type: Required[Literal["conversation.item.truncate"]] + """The event type, must be `conversation.item.truncate`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncated_event.py b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncated_event.py new file mode 100644 index 00000000..f56cabc3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/conversation_item_truncated_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ConversationItemTruncatedEvent"] + + +class ConversationItemTruncatedEvent(BaseModel): + audio_end_ms: int + """The duration up to which the audio was truncated, in milliseconds.""" + + content_index: int + """The index of the content part that was truncated.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the assistant message item that was truncated.""" + + type: Literal["conversation.item.truncated"] + """The event type, must be `conversation.item.truncated`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event.py new file mode 100644 index 00000000..8562cf0a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputAudioBufferAppendEvent"] + + +class InputAudioBufferAppendEvent(BaseModel): + audio: str + """Base64-encoded audio bytes. + + This must be in the format specified by the `input_audio_format` field in the + session configuration. + """ + + type: Literal["input_audio_buffer.append"] + """The event type, must be `input_audio_buffer.append`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event_param.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event_param.py new file mode 100644 index 00000000..3ad0bc73 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_append_event_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["InputAudioBufferAppendEventParam"] + + +class InputAudioBufferAppendEventParam(TypedDict, total=False): + audio: Required[str] + """Base64-encoded audio bytes. + + This must be in the format specified by the `input_audio_format` field in the + session configuration. + """ + + type: Required[Literal["input_audio_buffer.append"]] + """The event type, must be `input_audio_buffer.append`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event.py new file mode 100644 index 00000000..9922ff3b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputAudioBufferClearEvent"] + + +class InputAudioBufferClearEvent(BaseModel): + type: Literal["input_audio_buffer.clear"] + """The event type, must be `input_audio_buffer.clear`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event_param.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event_param.py new file mode 100644 index 00000000..2bd6bc5a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_clear_event_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["InputAudioBufferClearEventParam"] + + +class InputAudioBufferClearEventParam(TypedDict, total=False): + type: Required[Literal["input_audio_buffer.clear"]] + """The event type, must be `input_audio_buffer.clear`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_cleared_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_cleared_event.py new file mode 100644 index 00000000..af71844f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_cleared_event.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputAudioBufferClearedEvent"] + + +class InputAudioBufferClearedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + type: Literal["input_audio_buffer.cleared"] + """The event type, must be `input_audio_buffer.cleared`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event.py new file mode 100644 index 00000000..125c3ba1 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputAudioBufferCommitEvent"] + + +class InputAudioBufferCommitEvent(BaseModel): + type: Literal["input_audio_buffer.commit"] + """The event type, must be `input_audio_buffer.commit`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event_param.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event_param.py new file mode 100644 index 00000000..c9c927ab --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_commit_event_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["InputAudioBufferCommitEventParam"] + + +class InputAudioBufferCommitEventParam(TypedDict, total=False): + type: Required[Literal["input_audio_buffer.commit"]] + """The event type, must be `input_audio_buffer.commit`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_committed_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_committed_event.py new file mode 100644 index 00000000..5ed1b4cc --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_committed_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputAudioBufferCommittedEvent"] + + +class InputAudioBufferCommittedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item that will be created.""" + + type: Literal["input_audio_buffer.committed"] + """The event type, must be `input_audio_buffer.committed`.""" + + previous_item_id: Optional[str] = None + """ + The ID of the preceding item after which the new item will be inserted. Can be + `null` if the item has no predecessor. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_started_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_started_event.py new file mode 100644 index 00000000..865205d7 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_started_event.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputAudioBufferSpeechStartedEvent"] + + +class InputAudioBufferSpeechStartedEvent(BaseModel): + audio_start_ms: int + """ + Milliseconds from the start of all audio written to the buffer during the + session when speech was first detected. This will correspond to the beginning of + audio sent to the model, and thus includes the `prefix_padding_ms` configured in + the Session. + """ + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item that will be created when speech stops.""" + + type: Literal["input_audio_buffer.speech_started"] + """The event type, must be `input_audio_buffer.speech_started`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_stopped_event.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_stopped_event.py new file mode 100644 index 00000000..6cb7845f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_speech_stopped_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputAudioBufferSpeechStoppedEvent"] + + +class InputAudioBufferSpeechStoppedEvent(BaseModel): + audio_end_ms: int + """Milliseconds since the session started when speech stopped. + + This will correspond to the end of audio sent to the model, and thus includes + the `min_silence_duration_ms` configured in the Session. + """ + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item that will be created.""" + + type: Literal["input_audio_buffer.speech_stopped"] + """The event type, must be `input_audio_buffer.speech_stopped`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_timeout_triggered.py b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_timeout_triggered.py new file mode 100644 index 00000000..5c5dc5cf --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/input_audio_buffer_timeout_triggered.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputAudioBufferTimeoutTriggered"] + + +class InputAudioBufferTimeoutTriggered(BaseModel): + audio_end_ms: int + """ + Millisecond offset of audio written to the input audio buffer at the time the + timeout was triggered. + """ + + audio_start_ms: int + """ + Millisecond offset of audio written to the input audio buffer that was after the + playback time of the last model response. + """ + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item associated with this segment.""" + + type: Literal["input_audio_buffer.timeout_triggered"] + """The event type, must be `input_audio_buffer.timeout_triggered`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/log_prob_properties.py b/portkey_ai/_vendor/openai/types/realtime/log_prob_properties.py new file mode 100644 index 00000000..92477d67 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/log_prob_properties.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel + +__all__ = ["LogProbProperties"] + + +class LogProbProperties(BaseModel): + token: str + """The token that was used to generate the log probability.""" + + bytes: List[int] + """The bytes that were used to generate the log probability.""" + + logprob: float + """The log probability of the token.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_completed.py b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_completed.py new file mode 100644 index 00000000..941280f0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_completed.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["McpListToolsCompleted"] + + +class McpListToolsCompleted(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the MCP list tools item.""" + + type: Literal["mcp_list_tools.completed"] + """The event type, must be `mcp_list_tools.completed`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_failed.py b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_failed.py new file mode 100644 index 00000000..892eda21 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_failed.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["McpListToolsFailed"] + + +class McpListToolsFailed(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the MCP list tools item.""" + + type: Literal["mcp_list_tools.failed"] + """The event type, must be `mcp_list_tools.failed`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_in_progress.py b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_in_progress.py new file mode 100644 index 00000000..4254b5fd --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/mcp_list_tools_in_progress.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["McpListToolsInProgress"] + + +class McpListToolsInProgress(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the MCP list tools item.""" + + type: Literal["mcp_list_tools.in_progress"] + """The event type, must be `mcp_list_tools.in_progress`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/noise_reduction_type.py b/portkey_ai/_vendor/openai/types/realtime/noise_reduction_type.py new file mode 100644 index 00000000..f4338991 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/noise_reduction_type.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["NoiseReductionType"] + +NoiseReductionType: TypeAlias = Literal["near_field", "far_field"] diff --git a/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event.py b/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event.py new file mode 100644 index 00000000..b4c95039 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["OutputAudioBufferClearEvent"] + + +class OutputAudioBufferClearEvent(BaseModel): + type: Literal["output_audio_buffer.clear"] + """The event type, must be `output_audio_buffer.clear`.""" + + event_id: Optional[str] = None + """The unique ID of the client event used for error handling.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event_param.py b/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event_param.py new file mode 100644 index 00000000..a3205ebc --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/output_audio_buffer_clear_event_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["OutputAudioBufferClearEventParam"] + + +class OutputAudioBufferClearEventParam(TypedDict, total=False): + type: Required[Literal["output_audio_buffer.clear"]] + """The event type, must be `output_audio_buffer.clear`.""" + + event_id: str + """The unique ID of the client event used for error handling.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/rate_limits_updated_event.py b/portkey_ai/_vendor/openai/types/realtime/rate_limits_updated_event.py new file mode 100644 index 00000000..048a4028 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/rate_limits_updated_event.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RateLimitsUpdatedEvent", "RateLimit"] + + +class RateLimit(BaseModel): + limit: Optional[int] = None + """The maximum allowed value for the rate limit.""" + + name: Optional[Literal["requests", "tokens"]] = None + """The name of the rate limit (`requests`, `tokens`).""" + + remaining: Optional[int] = None + """The remaining value before the limit is reached.""" + + reset_seconds: Optional[float] = None + """Seconds until the rate limit resets.""" + + +class RateLimitsUpdatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + rate_limits: List[RateLimit] + """List of rate limit information.""" + + type: Literal["rate_limits.updated"] + """The event type, must be `rate_limits.updated`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config.py new file mode 100644 index 00000000..72d7cc59 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .realtime_audio_config_input import RealtimeAudioConfigInput +from .realtime_audio_config_output import RealtimeAudioConfigOutput + +__all__ = ["RealtimeAudioConfig"] + + +class RealtimeAudioConfig(BaseModel): + input: Optional[RealtimeAudioConfigInput] = None + + output: Optional[RealtimeAudioConfigOutput] = None diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input.py new file mode 100644 index 00000000..cfcb7f22 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input.py @@ -0,0 +1,63 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .audio_transcription import AudioTranscription +from .noise_reduction_type import NoiseReductionType +from .realtime_audio_formats import RealtimeAudioFormats +from .realtime_audio_input_turn_detection import RealtimeAudioInputTurnDetection + +__all__ = ["RealtimeAudioConfigInput", "NoiseReduction"] + + +class NoiseReduction(BaseModel): + type: Optional[NoiseReductionType] = None + """Type of noise reduction. + + `near_field` is for close-talking microphones such as headphones, `far_field` is + for far-field microphones such as laptop or conference room microphones. + """ + + +class RealtimeAudioConfigInput(BaseModel): + format: Optional[RealtimeAudioFormats] = None + """The format of the input audio.""" + + noise_reduction: Optional[NoiseReduction] = None + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. Noise reduction filters audio added to + the input audio buffer before it is sent to VAD and the model. Filtering the + audio can improve VAD and turn detection accuracy (reducing false positives) and + model performance by improving perception of the input audio. + """ + + transcription: Optional[AudioTranscription] = None + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through + [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as guidance of input audio content rather than precisely + what the model heard. The client can optionally set the language and prompt for + transcription, these offer additional guidance to the transcription service. + """ + + turn_detection: Optional[RealtimeAudioInputTurnDetection] = None + """Configuration for turn detection, ether Server VAD or Semantic VAD. + + This can be set to `null` to turn off, in which case the client must manually + trigger model response. + + Server VAD means that the model will detect the start and end of speech based on + audio volume and respond at the end of user speech. + + Semantic VAD is more advanced and uses a turn detection model (in conjunction + with VAD) to semantically estimate whether the user has finished speaking, then + dynamically sets a timeout based on this probability. For example, if user audio + trails off with "uhhm", the model will score a low probability of turn end and + wait longer for the user to continue speaking. This can be useful for more + natural conversations, but may have a higher latency. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input_param.py new file mode 100644 index 00000000..730f46cf --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_input_param.py @@ -0,0 +1,65 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import TypedDict + +from .noise_reduction_type import NoiseReductionType +from .audio_transcription_param import AudioTranscriptionParam +from .realtime_audio_formats_param import RealtimeAudioFormatsParam +from .realtime_audio_input_turn_detection_param import RealtimeAudioInputTurnDetectionParam + +__all__ = ["RealtimeAudioConfigInputParam", "NoiseReduction"] + + +class NoiseReduction(TypedDict, total=False): + type: NoiseReductionType + """Type of noise reduction. + + `near_field` is for close-talking microphones such as headphones, `far_field` is + for far-field microphones such as laptop or conference room microphones. + """ + + +class RealtimeAudioConfigInputParam(TypedDict, total=False): + format: RealtimeAudioFormatsParam + """The format of the input audio.""" + + noise_reduction: NoiseReduction + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. Noise reduction filters audio added to + the input audio buffer before it is sent to VAD and the model. Filtering the + audio can improve VAD and turn detection accuracy (reducing false positives) and + model performance by improving perception of the input audio. + """ + + transcription: AudioTranscriptionParam + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through + [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as guidance of input audio content rather than precisely + what the model heard. The client can optionally set the language and prompt for + transcription, these offer additional guidance to the transcription service. + """ + + turn_detection: Optional[RealtimeAudioInputTurnDetectionParam] + """Configuration for turn detection, ether Server VAD or Semantic VAD. + + This can be set to `null` to turn off, in which case the client must manually + trigger model response. + + Server VAD means that the model will detect the start and end of speech based on + audio volume and respond at the end of user speech. + + Semantic VAD is more advanced and uses a turn detection model (in conjunction + with VAD) to semantically estimate whether the user has finished speaking, then + dynamically sets a timeout based on this probability. For example, if user audio + trails off with "uhhm", the model will score a low probability of turn end and + wait longer for the user to continue speaking. This can be useful for more + natural conversations, but may have a higher latency. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output.py new file mode 100644 index 00000000..a8af237c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .realtime_audio_formats import RealtimeAudioFormats + +__all__ = ["RealtimeAudioConfigOutput"] + + +class RealtimeAudioConfigOutput(BaseModel): + format: Optional[RealtimeAudioFormats] = None + """The format of the output audio.""" + + speed: Optional[float] = None + """ + The speed of the model's spoken response as a multiple of the original speed. + 1.0 is the default speed. 0.25 is the minimum speed. 1.5 is the maximum speed. + This value can only be changed in between model turns, not while a response is + in progress. + + This parameter is a post-processing adjustment to the audio after it is + generated, it's also possible to prompt the model to speak faster or slower. + """ + + voice: Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], None + ] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend + `marin` and `cedar` for best quality. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output_param.py new file mode 100644 index 00000000..8e887d34 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_output_param.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypedDict + +from .realtime_audio_formats_param import RealtimeAudioFormatsParam + +__all__ = ["RealtimeAudioConfigOutputParam"] + + +class RealtimeAudioConfigOutputParam(TypedDict, total=False): + format: RealtimeAudioFormatsParam + """The format of the output audio.""" + + speed: float + """ + The speed of the model's spoken response as a multiple of the original speed. + 1.0 is the default speed. 0.25 is the minimum speed. 1.5 is the maximum speed. + This value can only be changed in between model turns, not while a response is + in progress. + + This parameter is a post-processing adjustment to the audio after it is + generated, it's also possible to prompt the model to speak faster or slower. + """ + + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]] + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend + `marin` and `cedar` for best quality. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_param.py new file mode 100644 index 00000000..2c41de35 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_config_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +from .realtime_audio_config_input_param import RealtimeAudioConfigInputParam +from .realtime_audio_config_output_param import RealtimeAudioConfigOutputParam + +__all__ = ["RealtimeAudioConfigParam"] + + +class RealtimeAudioConfigParam(TypedDict, total=False): + input: RealtimeAudioConfigInputParam + + output: RealtimeAudioConfigOutputParam diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats.py new file mode 100644 index 00000000..10f91883 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = ["RealtimeAudioFormats", "AudioPCM", "AudioPCMU", "AudioPCMA"] + + +class AudioPCM(BaseModel): + rate: Optional[Literal[24000]] = None + """The sample rate of the audio. Always `24000`.""" + + type: Optional[Literal["audio/pcm"]] = None + """The audio format. Always `audio/pcm`.""" + + +class AudioPCMU(BaseModel): + type: Optional[Literal["audio/pcmu"]] = None + """The audio format. Always `audio/pcmu`.""" + + +class AudioPCMA(BaseModel): + type: Optional[Literal["audio/pcma"]] = None + """The audio format. Always `audio/pcma`.""" + + +RealtimeAudioFormats: TypeAlias = Annotated[Union[AudioPCM, AudioPCMU, AudioPCMA], PropertyInfo(discriminator="type")] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats_param.py new file mode 100644 index 00000000..cf58577f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_formats_param.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypeAlias, TypedDict + +__all__ = ["RealtimeAudioFormatsParam", "AudioPCM", "AudioPCMU", "AudioPCMA"] + + +class AudioPCM(TypedDict, total=False): + rate: Literal[24000] + """The sample rate of the audio. Always `24000`.""" + + type: Literal["audio/pcm"] + """The audio format. Always `audio/pcm`.""" + + +class AudioPCMU(TypedDict, total=False): + type: Literal["audio/pcmu"] + """The audio format. Always `audio/pcmu`.""" + + +class AudioPCMA(TypedDict, total=False): + type: Literal["audio/pcma"] + """The audio format. Always `audio/pcma`.""" + + +RealtimeAudioFormatsParam: TypeAlias = Union[AudioPCM, AudioPCMU, AudioPCMA] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection.py new file mode 100644 index 00000000..d3f4e003 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection.py @@ -0,0 +1,98 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = ["RealtimeAudioInputTurnDetection", "ServerVad", "SemanticVad"] + + +class ServerVad(BaseModel): + type: Literal["server_vad"] + """Type of turn detection, `server_vad` to turn on simple Server VAD.""" + + create_response: Optional[bool] = None + """ + Whether or not to automatically generate a response when a VAD stop event + occurs. + """ + + idle_timeout_ms: Optional[int] = None + """Optional timeout after which a model response will be triggered automatically. + + This is useful for situations in which a long pause from the user is unexpected, + such as a phone call. The model will effectively prompt the user to continue the + conversation based on the current context. + + The timeout value will be applied after the last model response's audio has + finished playing, i.e. it's set to the `response.done` time plus audio playback + duration. + + An `input_audio_buffer.timeout_triggered` event (plus events associated with the + Response) will be emitted when the timeout is reached. Idle timeout is currently + only supported for `server_vad` mode. + """ + + interrupt_response: Optional[bool] = None + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. + """ + + prefix_padding_ms: Optional[int] = None + """Used only for `server_vad` mode. + + Amount of audio to include before the VAD detected speech (in milliseconds). + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Used only for `server_vad` mode. + + Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + With shorter values the model will respond more quickly, but may jump in on + short pauses from the user. + """ + + threshold: Optional[float] = None + """Used only for `server_vad` mode. + + Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + threshold will require louder audio to activate the model, and thus might + perform better in noisy environments. + """ + + +class SemanticVad(BaseModel): + type: Literal["semantic_vad"] + """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" + + create_response: Optional[bool] = None + """ + Whether or not to automatically generate a response when a VAD stop event + occurs. + """ + + eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None + """Used only for `semantic_vad` mode. + + The eagerness of the model to respond. `low` will wait longer for the user to + continue speaking, `high` will respond more quickly. `auto` is the default and + is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s, + 4s, and 2s respectively. + """ + + interrupt_response: Optional[bool] = None + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. + """ + + +RealtimeAudioInputTurnDetection: TypeAlias = Annotated[ + Union[ServerVad, SemanticVad, None], PropertyInfo(discriminator="type") +] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection_param.py new file mode 100644 index 00000000..09b8cfd1 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_audio_input_turn_detection_param.py @@ -0,0 +1,95 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["RealtimeAudioInputTurnDetectionParam", "ServerVad", "SemanticVad"] + + +class ServerVad(TypedDict, total=False): + type: Required[Literal["server_vad"]] + """Type of turn detection, `server_vad` to turn on simple Server VAD.""" + + create_response: bool + """ + Whether or not to automatically generate a response when a VAD stop event + occurs. + """ + + idle_timeout_ms: Optional[int] + """Optional timeout after which a model response will be triggered automatically. + + This is useful for situations in which a long pause from the user is unexpected, + such as a phone call. The model will effectively prompt the user to continue the + conversation based on the current context. + + The timeout value will be applied after the last model response's audio has + finished playing, i.e. it's set to the `response.done` time plus audio playback + duration. + + An `input_audio_buffer.timeout_triggered` event (plus events associated with the + Response) will be emitted when the timeout is reached. Idle timeout is currently + only supported for `server_vad` mode. + """ + + interrupt_response: bool + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. + """ + + prefix_padding_ms: int + """Used only for `server_vad` mode. + + Amount of audio to include before the VAD detected speech (in milliseconds). + Defaults to 300ms. + """ + + silence_duration_ms: int + """Used only for `server_vad` mode. + + Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + With shorter values the model will respond more quickly, but may jump in on + short pauses from the user. + """ + + threshold: float + """Used only for `server_vad` mode. + + Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + threshold will require louder audio to activate the model, and thus might + perform better in noisy environments. + """ + + +class SemanticVad(TypedDict, total=False): + type: Required[Literal["semantic_vad"]] + """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" + + create_response: bool + """ + Whether or not to automatically generate a response when a VAD stop event + occurs. + """ + + eagerness: Literal["low", "medium", "high", "auto"] + """Used only for `semantic_vad` mode. + + The eagerness of the model to respond. `low` will wait longer for the user to + continue speaking, `high` will respond more quickly. `auto` is the default and + is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s, + 4s, and 2s respectively. + """ + + interrupt_response: bool + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. + """ + + +RealtimeAudioInputTurnDetectionParam: TypeAlias = Union[ServerVad, SemanticVad] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_client_event.py b/portkey_ai/_vendor/openai/types/realtime/realtime_client_event.py new file mode 100644 index 00000000..3b1c348d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_client_event.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .session_update_event import SessionUpdateEvent +from .response_cancel_event import ResponseCancelEvent +from .response_create_event import ResponseCreateEvent +from .conversation_item_create_event import ConversationItemCreateEvent +from .conversation_item_delete_event import ConversationItemDeleteEvent +from .input_audio_buffer_clear_event import InputAudioBufferClearEvent +from .input_audio_buffer_append_event import InputAudioBufferAppendEvent +from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent +from .output_audio_buffer_clear_event import OutputAudioBufferClearEvent +from .conversation_item_retrieve_event import ConversationItemRetrieveEvent +from .conversation_item_truncate_event import ConversationItemTruncateEvent + +__all__ = ["RealtimeClientEvent"] + +RealtimeClientEvent: TypeAlias = Annotated[ + Union[ + ConversationItemCreateEvent, + ConversationItemDeleteEvent, + ConversationItemRetrieveEvent, + ConversationItemTruncateEvent, + InputAudioBufferAppendEvent, + InputAudioBufferClearEvent, + OutputAudioBufferClearEvent, + InputAudioBufferCommitEvent, + ResponseCancelEvent, + ResponseCreateEvent, + SessionUpdateEvent, + ], + PropertyInfo(discriminator="type"), +] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_client_event_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_client_event_param.py new file mode 100644 index 00000000..cda5766e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_client_event_param.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .session_update_event_param import SessionUpdateEventParam +from .response_cancel_event_param import ResponseCancelEventParam +from .response_create_event_param import ResponseCreateEventParam +from .conversation_item_create_event_param import ConversationItemCreateEventParam +from .conversation_item_delete_event_param import ConversationItemDeleteEventParam +from .input_audio_buffer_clear_event_param import InputAudioBufferClearEventParam +from .input_audio_buffer_append_event_param import InputAudioBufferAppendEventParam +from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventParam +from .output_audio_buffer_clear_event_param import OutputAudioBufferClearEventParam +from .conversation_item_retrieve_event_param import ConversationItemRetrieveEventParam +from .conversation_item_truncate_event_param import ConversationItemTruncateEventParam + +__all__ = ["RealtimeClientEventParam"] + +RealtimeClientEventParam: TypeAlias = Union[ + ConversationItemCreateEventParam, + ConversationItemDeleteEventParam, + ConversationItemRetrieveEventParam, + ConversationItemTruncateEventParam, + InputAudioBufferAppendEventParam, + InputAudioBufferClearEventParam, + OutputAudioBufferClearEventParam, + InputAudioBufferCommitEventParam, + ResponseCancelEventParam, + ResponseCreateEventParam, + SessionUpdateEventParam, +] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_connect_params.py b/portkey_ai/_vendor/openai/types/realtime/realtime_connect_params.py new file mode 100644 index 00000000..76474f3d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_connect_params.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["RealtimeConnectParams"] + + +class RealtimeConnectParams(TypedDict, total=False): + model: Required[str] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message.py new file mode 100644 index 00000000..6b0f86ee --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message.py @@ -0,0 +1,58 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeConversationItemAssistantMessage", "Content"] + + +class Content(BaseModel): + audio: Optional[str] = None + """ + Base64-encoded audio bytes, these will be parsed as the format specified in the + session output audio type configuration. This defaults to PCM 16-bit 24kHz mono + if not specified. + """ + + text: Optional[str] = None + """The text content.""" + + transcript: Optional[str] = None + """ + The transcript of the audio content, this will always be present if the output + type is `audio`. + """ + + type: Optional[Literal["output_text", "output_audio"]] = None + """ + The content type, `output_text` or `output_audio` depending on the session + `output_modalities` configuration. + """ + + +class RealtimeConversationItemAssistantMessage(BaseModel): + content: List[Content] + """The content of the message.""" + + role: Literal["assistant"] + """The role of the message sender. Always `assistant`.""" + + type: Literal["message"] + """The type of the item. Always `message`.""" + + id: Optional[str] = None + """The unique ID of the item. + + This may be provided by the client or generated by the server. + """ + + object: Optional[Literal["realtime.item"]] = None + """Identifier for the API object being returned - always `realtime.item`. + + Optional when creating a new item. + """ + + status: Optional[Literal["completed", "incomplete", "in_progress"]] = None + """The status of the item. Has no effect on the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message_param.py new file mode 100644 index 00000000..93699afb --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_assistant_message_param.py @@ -0,0 +1,58 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeConversationItemAssistantMessageParam", "Content"] + + +class Content(TypedDict, total=False): + audio: str + """ + Base64-encoded audio bytes, these will be parsed as the format specified in the + session output audio type configuration. This defaults to PCM 16-bit 24kHz mono + if not specified. + """ + + text: str + """The text content.""" + + transcript: str + """ + The transcript of the audio content, this will always be present if the output + type is `audio`. + """ + + type: Literal["output_text", "output_audio"] + """ + The content type, `output_text` or `output_audio` depending on the session + `output_modalities` configuration. + """ + + +class RealtimeConversationItemAssistantMessageParam(TypedDict, total=False): + content: Required[Iterable[Content]] + """The content of the message.""" + + role: Required[Literal["assistant"]] + """The role of the message sender. Always `assistant`.""" + + type: Required[Literal["message"]] + """The type of the item. Always `message`.""" + + id: str + """The unique ID of the item. + + This may be provided by the client or generated by the server. + """ + + object: Literal["realtime.item"] + """Identifier for the API object being returned - always `realtime.item`. + + Optional when creating a new item. + """ + + status: Literal["completed", "incomplete", "in_progress"] + """The status of the item. Has no effect on the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call.py new file mode 100644 index 00000000..279a2fcd --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeConversationItemFunctionCall"] + + +class RealtimeConversationItemFunctionCall(BaseModel): + arguments: str + """The arguments of the function call. + + This is a JSON-encoded string representing the arguments passed to the function, + for example `{"arg1": "value1", "arg2": 42}`. + """ + + name: str + """The name of the function being called.""" + + type: Literal["function_call"] + """The type of the item. Always `function_call`.""" + + id: Optional[str] = None + """The unique ID of the item. + + This may be provided by the client or generated by the server. + """ + + call_id: Optional[str] = None + """The ID of the function call.""" + + object: Optional[Literal["realtime.item"]] = None + """Identifier for the API object being returned - always `realtime.item`. + + Optional when creating a new item. + """ + + status: Optional[Literal["completed", "incomplete", "in_progress"]] = None + """The status of the item. Has no effect on the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output.py new file mode 100644 index 00000000..4b6b15d0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeConversationItemFunctionCallOutput"] + + +class RealtimeConversationItemFunctionCallOutput(BaseModel): + call_id: str + """The ID of the function call this output is for.""" + + output: str + """ + The output of the function call, this is free text and can contain any + information or simply be empty. + """ + + type: Literal["function_call_output"] + """The type of the item. Always `function_call_output`.""" + + id: Optional[str] = None + """The unique ID of the item. + + This may be provided by the client or generated by the server. + """ + + object: Optional[Literal["realtime.item"]] = None + """Identifier for the API object being returned - always `realtime.item`. + + Optional when creating a new item. + """ + + status: Optional[Literal["completed", "incomplete", "in_progress"]] = None + """The status of the item. Has no effect on the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output_param.py new file mode 100644 index 00000000..56d62da5 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_output_param.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeConversationItemFunctionCallOutputParam"] + + +class RealtimeConversationItemFunctionCallOutputParam(TypedDict, total=False): + call_id: Required[str] + """The ID of the function call this output is for.""" + + output: Required[str] + """ + The output of the function call, this is free text and can contain any + information or simply be empty. + """ + + type: Required[Literal["function_call_output"]] + """The type of the item. Always `function_call_output`.""" + + id: str + """The unique ID of the item. + + This may be provided by the client or generated by the server. + """ + + object: Literal["realtime.item"] + """Identifier for the API object being returned - always `realtime.item`. + + Optional when creating a new item. + """ + + status: Literal["completed", "incomplete", "in_progress"] + """The status of the item. Has no effect on the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_param.py new file mode 100644 index 00000000..36a16a27 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_function_call_param.py @@ -0,0 +1,40 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeConversationItemFunctionCallParam"] + + +class RealtimeConversationItemFunctionCallParam(TypedDict, total=False): + arguments: Required[str] + """The arguments of the function call. + + This is a JSON-encoded string representing the arguments passed to the function, + for example `{"arg1": "value1", "arg2": 42}`. + """ + + name: Required[str] + """The name of the function being called.""" + + type: Required[Literal["function_call"]] + """The type of the item. Always `function_call`.""" + + id: str + """The unique ID of the item. + + This may be provided by the client or generated by the server. + """ + + call_id: str + """The ID of the function call.""" + + object: Literal["realtime.item"] + """Identifier for the API object being returned - always `realtime.item`. + + Optional when creating a new item. + """ + + status: Literal["completed", "incomplete", "in_progress"] + """The status of the item. Has no effect on the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message.py new file mode 100644 index 00000000..7dac5c9f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeConversationItemSystemMessage", "Content"] + + +class Content(BaseModel): + text: Optional[str] = None + """The text content.""" + + type: Optional[Literal["input_text"]] = None + """The content type. Always `input_text` for system messages.""" + + +class RealtimeConversationItemSystemMessage(BaseModel): + content: List[Content] + """The content of the message.""" + + role: Literal["system"] + """The role of the message sender. Always `system`.""" + + type: Literal["message"] + """The type of the item. Always `message`.""" + + id: Optional[str] = None + """The unique ID of the item. + + This may be provided by the client or generated by the server. + """ + + object: Optional[Literal["realtime.item"]] = None + """Identifier for the API object being returned - always `realtime.item`. + + Optional when creating a new item. + """ + + status: Optional[Literal["completed", "incomplete", "in_progress"]] = None + """The status of the item. Has no effect on the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message_param.py new file mode 100644 index 00000000..a2790fcf --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_system_message_param.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeConversationItemSystemMessageParam", "Content"] + + +class Content(TypedDict, total=False): + text: str + """The text content.""" + + type: Literal["input_text"] + """The content type. Always `input_text` for system messages.""" + + +class RealtimeConversationItemSystemMessageParam(TypedDict, total=False): + content: Required[Iterable[Content]] + """The content of the message.""" + + role: Required[Literal["system"]] + """The role of the message sender. Always `system`.""" + + type: Required[Literal["message"]] + """The type of the item. Always `message`.""" + + id: str + """The unique ID of the item. + + This may be provided by the client or generated by the server. + """ + + object: Literal["realtime.item"] + """Identifier for the API object being returned - always `realtime.item`. + + Optional when creating a new item. + """ + + status: Literal["completed", "incomplete", "in_progress"] + """The status of the item. Has no effect on the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message.py new file mode 100644 index 00000000..30d9bb10 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message.py @@ -0,0 +1,69 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeConversationItemUserMessage", "Content"] + + +class Content(BaseModel): + audio: Optional[str] = None + """ + Base64-encoded audio bytes (for `input_audio`), these will be parsed as the + format specified in the session input audio type configuration. This defaults to + PCM 16-bit 24kHz mono if not specified. + """ + + detail: Optional[Literal["auto", "low", "high"]] = None + """The detail level of the image (for `input_image`). + + `auto` will default to `high`. + """ + + image_url: Optional[str] = None + """Base64-encoded image bytes (for `input_image`) as a data URI. + + For example `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA...`. Supported + formats are PNG and JPEG. + """ + + text: Optional[str] = None + """The text content (for `input_text`).""" + + transcript: Optional[str] = None + """Transcript of the audio (for `input_audio`). + + This is not sent to the model, but will be attached to the message item for + reference. + """ + + type: Optional[Literal["input_text", "input_audio", "input_image"]] = None + """The content type (`input_text`, `input_audio`, or `input_image`).""" + + +class RealtimeConversationItemUserMessage(BaseModel): + content: List[Content] + """The content of the message.""" + + role: Literal["user"] + """The role of the message sender. Always `user`.""" + + type: Literal["message"] + """The type of the item. Always `message`.""" + + id: Optional[str] = None + """The unique ID of the item. + + This may be provided by the client or generated by the server. + """ + + object: Optional[Literal["realtime.item"]] = None + """Identifier for the API object being returned - always `realtime.item`. + + Optional when creating a new item. + """ + + status: Optional[Literal["completed", "incomplete", "in_progress"]] = None + """The status of the item. Has no effect on the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message_param.py new file mode 100644 index 00000000..7d3b9bc1 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_conversation_item_user_message_param.py @@ -0,0 +1,69 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeConversationItemUserMessageParam", "Content"] + + +class Content(TypedDict, total=False): + audio: str + """ + Base64-encoded audio bytes (for `input_audio`), these will be parsed as the + format specified in the session input audio type configuration. This defaults to + PCM 16-bit 24kHz mono if not specified. + """ + + detail: Literal["auto", "low", "high"] + """The detail level of the image (for `input_image`). + + `auto` will default to `high`. + """ + + image_url: str + """Base64-encoded image bytes (for `input_image`) as a data URI. + + For example `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA...`. Supported + formats are PNG and JPEG. + """ + + text: str + """The text content (for `input_text`).""" + + transcript: str + """Transcript of the audio (for `input_audio`). + + This is not sent to the model, but will be attached to the message item for + reference. + """ + + type: Literal["input_text", "input_audio", "input_image"] + """The content type (`input_text`, `input_audio`, or `input_image`).""" + + +class RealtimeConversationItemUserMessageParam(TypedDict, total=False): + content: Required[Iterable[Content]] + """The content of the message.""" + + role: Required[Literal["user"]] + """The role of the message sender. Always `user`.""" + + type: Required[Literal["message"]] + """The type of the item. Always `message`.""" + + id: str + """The unique ID of the item. + + This may be provided by the client or generated by the server. + """ + + object: Literal["realtime.item"] + """Identifier for the API object being returned - always `realtime.item`. + + Optional when creating a new item. + """ + + status: Literal["completed", "incomplete", "in_progress"] + """The status of the item. Has no effect on the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_error.py b/portkey_ai/_vendor/openai/types/realtime/realtime_error.py new file mode 100644 index 00000000..f1017d09 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_error.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["RealtimeError"] + + +class RealtimeError(BaseModel): + message: str + """A human-readable error message.""" + + type: str + """The type of error (e.g., "invalid_request_error", "server_error").""" + + code: Optional[str] = None + """Error code, if any.""" + + event_id: Optional[str] = None + """The event_id of the client event that caused the error, if applicable.""" + + param: Optional[str] = None + """Parameter related to the error, if any.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_error_event.py b/portkey_ai/_vendor/openai/types/realtime/realtime_error_event.py new file mode 100644 index 00000000..8b501d6b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_error_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .realtime_error import RealtimeError + +__all__ = ["RealtimeErrorEvent"] + + +class RealtimeErrorEvent(BaseModel): + error: RealtimeError + """Details of the error.""" + + event_id: str + """The unique ID of the server event.""" + + type: Literal["error"] + """The event type, must be `error`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_function_tool.py b/portkey_ai/_vendor/openai/types/realtime/realtime_function_tool.py new file mode 100644 index 00000000..48dbf992 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_function_tool.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeFunctionTool"] + + +class RealtimeFunctionTool(BaseModel): + description: Optional[str] = None + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: Optional[str] = None + """The name of the function.""" + + parameters: Optional[object] = None + """Parameters of the function in JSON Schema.""" + + type: Optional[Literal["function"]] = None + """The type of the tool, i.e. `function`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_function_tool_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_function_tool_param.py new file mode 100644 index 00000000..f42e3e49 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_function_tool_param.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["RealtimeFunctionToolParam"] + + +class RealtimeFunctionToolParam(TypedDict, total=False): + description: str + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: str + """The name of the function.""" + + parameters: object + """Parameters of the function in JSON Schema.""" + + type: Literal["function"] + """The type of the tool, i.e. `function`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request.py new file mode 100644 index 00000000..bafc8d89 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeMcpApprovalRequest"] + + +class RealtimeMcpApprovalRequest(BaseModel): + id: str + """The unique ID of the approval request.""" + + arguments: str + """A JSON string of arguments for the tool.""" + + name: str + """The name of the tool to run.""" + + server_label: str + """The label of the MCP server making the request.""" + + type: Literal["mcp_approval_request"] + """The type of the item. Always `mcp_approval_request`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request_param.py new file mode 100644 index 00000000..57c21a48 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_request_param.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeMcpApprovalRequestParam"] + + +class RealtimeMcpApprovalRequestParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the approval request.""" + + arguments: Required[str] + """A JSON string of arguments for the tool.""" + + name: Required[str] + """The name of the tool to run.""" + + server_label: Required[str] + """The label of the MCP server making the request.""" + + type: Required[Literal["mcp_approval_request"]] + """The type of the item. Always `mcp_approval_request`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response.py new file mode 100644 index 00000000..2cb03bc6 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeMcpApprovalResponse"] + + +class RealtimeMcpApprovalResponse(BaseModel): + id: str + """The unique ID of the approval response.""" + + approval_request_id: str + """The ID of the approval request being answered.""" + + approve: bool + """Whether the request was approved.""" + + type: Literal["mcp_approval_response"] + """The type of the item. Always `mcp_approval_response`.""" + + reason: Optional[str] = None + """Optional reason for the decision.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response_param.py new file mode 100644 index 00000000..19b63370 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_approval_response_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeMcpApprovalResponseParam"] + + +class RealtimeMcpApprovalResponseParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the approval response.""" + + approval_request_id: Required[str] + """The ID of the approval request being answered.""" + + approve: Required[bool] + """Whether the request was approved.""" + + type: Required[Literal["mcp_approval_response"]] + """The type of the item. Always `mcp_approval_response`.""" + + reason: Optional[str] + """Optional reason for the decision.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools.py new file mode 100644 index 00000000..aeb58a1f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeMcpListTools", "Tool"] + + +class Tool(BaseModel): + input_schema: object + """The JSON schema describing the tool's input.""" + + name: str + """The name of the tool.""" + + annotations: Optional[object] = None + """Additional annotations about the tool.""" + + description: Optional[str] = None + """The description of the tool.""" + + +class RealtimeMcpListTools(BaseModel): + server_label: str + """The label of the MCP server.""" + + tools: List[Tool] + """The tools available on the server.""" + + type: Literal["mcp_list_tools"] + """The type of the item. Always `mcp_list_tools`.""" + + id: Optional[str] = None + """The unique ID of the list.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools_param.py new file mode 100644 index 00000000..eb8605a0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_list_tools_param.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeMcpListToolsParam", "Tool"] + + +class Tool(TypedDict, total=False): + input_schema: Required[object] + """The JSON schema describing the tool's input.""" + + name: Required[str] + """The name of the tool.""" + + annotations: Optional[object] + """Additional annotations about the tool.""" + + description: Optional[str] + """The description of the tool.""" + + +class RealtimeMcpListToolsParam(TypedDict, total=False): + server_label: Required[str] + """The label of the MCP server.""" + + tools: Required[Iterable[Tool]] + """The tools available on the server.""" + + type: Required[Literal["mcp_list_tools"]] + """The type of the item. Always `mcp_list_tools`.""" + + id: str + """The unique ID of the list.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_protocol_error.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_protocol_error.py new file mode 100644 index 00000000..2e7cfdff --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_protocol_error.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeMcpProtocolError"] + + +class RealtimeMcpProtocolError(BaseModel): + code: int + + message: str + + type: Literal["protocol_error"] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_protocol_error_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_protocol_error_param.py new file mode 100644 index 00000000..bebe3d37 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_protocol_error_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeMcpProtocolErrorParam"] + + +class RealtimeMcpProtocolErrorParam(TypedDict, total=False): + code: Required[int] + + message: Required[str] + + type: Required[Literal["protocol_error"]] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call.py new file mode 100644 index 00000000..533175e5 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .realtime_mcphttp_error import RealtimeMcphttpError +from .realtime_mcp_protocol_error import RealtimeMcpProtocolError +from .realtime_mcp_tool_execution_error import RealtimeMcpToolExecutionError + +__all__ = ["RealtimeMcpToolCall", "Error"] + +Error: TypeAlias = Annotated[ + Union[RealtimeMcpProtocolError, RealtimeMcpToolExecutionError, RealtimeMcphttpError, None], + PropertyInfo(discriminator="type"), +] + + +class RealtimeMcpToolCall(BaseModel): + id: str + """The unique ID of the tool call.""" + + arguments: str + """A JSON string of the arguments passed to the tool.""" + + name: str + """The name of the tool that was run.""" + + server_label: str + """The label of the MCP server running the tool.""" + + type: Literal["mcp_tool_call"] + """The type of the item. Always `mcp_tool_call`.""" + + approval_request_id: Optional[str] = None + """The ID of an associated approval request, if any.""" + + error: Optional[Error] = None + """The error from the tool call, if any.""" + + output: Optional[str] = None + """The output from the tool call.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call_param.py new file mode 100644 index 00000000..afdc9d1d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_call_param.py @@ -0,0 +1,40 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .realtime_mcphttp_error_param import RealtimeMcphttpErrorParam +from .realtime_mcp_protocol_error_param import RealtimeMcpProtocolErrorParam +from .realtime_mcp_tool_execution_error_param import RealtimeMcpToolExecutionErrorParam + +__all__ = ["RealtimeMcpToolCallParam", "Error"] + +Error: TypeAlias = Union[RealtimeMcpProtocolErrorParam, RealtimeMcpToolExecutionErrorParam, RealtimeMcphttpErrorParam] + + +class RealtimeMcpToolCallParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the tool call.""" + + arguments: Required[str] + """A JSON string of the arguments passed to the tool.""" + + name: Required[str] + """The name of the tool that was run.""" + + server_label: Required[str] + """The label of the MCP server running the tool.""" + + type: Required[Literal["mcp_tool_call"]] + """The type of the item. Always `mcp_tool_call`.""" + + approval_request_id: Optional[str] + """The ID of an associated approval request, if any.""" + + error: Optional[Error] + """The error from the tool call, if any.""" + + output: Optional[str] + """The output from the tool call.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_execution_error.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_execution_error.py new file mode 100644 index 00000000..a2ed0631 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_execution_error.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeMcpToolExecutionError"] + + +class RealtimeMcpToolExecutionError(BaseModel): + message: str + + type: Literal["tool_execution_error"] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_execution_error_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_execution_error_param.py new file mode 100644 index 00000000..619e11c3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcp_tool_execution_error_param.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeMcpToolExecutionErrorParam"] + + +class RealtimeMcpToolExecutionErrorParam(TypedDict, total=False): + message: Required[str] + + type: Required[Literal["tool_execution_error"]] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcphttp_error.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcphttp_error.py new file mode 100644 index 00000000..53cff91e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcphttp_error.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeMcphttpError"] + + +class RealtimeMcphttpError(BaseModel): + code: int + + message: str + + type: Literal["http_error"] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_mcphttp_error_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_mcphttp_error_param.py new file mode 100644 index 00000000..2b80a6f0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_mcphttp_error_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeMcphttpErrorParam"] + + +class RealtimeMcphttpErrorParam(TypedDict, total=False): + code: Required[int] + + message: Required[str] + + type: Required[Literal["http_error"]] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response.py new file mode 100644 index 00000000..92d75491 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response.py @@ -0,0 +1,98 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.metadata import Metadata +from .conversation_item import ConversationItem +from .realtime_audio_formats import RealtimeAudioFormats +from .realtime_response_usage import RealtimeResponseUsage +from .realtime_response_status import RealtimeResponseStatus + +__all__ = ["RealtimeResponse", "Audio", "AudioOutput"] + + +class AudioOutput(BaseModel): + format: Optional[RealtimeAudioFormats] = None + """The format of the output audio.""" + + voice: Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], None + ] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend + `marin` and `cedar` for best quality. + """ + + +class Audio(BaseModel): + output: Optional[AudioOutput] = None + + +class RealtimeResponse(BaseModel): + id: Optional[str] = None + """The unique ID of the response, will look like `resp_1234`.""" + + audio: Optional[Audio] = None + """Configuration for audio output.""" + + conversation_id: Optional[str] = None + """ + Which conversation the response is added to, determined by the `conversation` + field in the `response.create` event. If `auto`, the response will be added to + the default conversation and the value of `conversation_id` will be an id like + `conv_1234`. If `none`, the response will not be added to any conversation and + the value of `conversation_id` will be `null`. If responses are being triggered + automatically by VAD the response will be added to the default conversation + """ + + max_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls, that was used in this response. + """ + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + object: Optional[Literal["realtime.response"]] = None + """The object type, must be `realtime.response`.""" + + output: Optional[List[ConversationItem]] = None + """The list of output items generated by the response.""" + + output_modalities: Optional[List[Literal["text", "audio"]]] = None + """ + The set of modalities the model used to respond, currently the only possible + values are `[\"audio\"]`, `[\"text\"]`. Audio output always include a text + transcript. Setting the output to mode `text` will disable audio output from the + model. + """ + + status: Optional[Literal["completed", "cancelled", "failed", "incomplete", "in_progress"]] = None + """ + The final status of the response (`completed`, `cancelled`, `failed`, or + `incomplete`, `in_progress`). + """ + + status_details: Optional[RealtimeResponseStatus] = None + """Additional details about the status.""" + + usage: Optional[RealtimeResponseUsage] = None + """Usage statistics for the Response, this will correspond to billing. + + A Realtime API session will maintain a conversation context and append new Items + to the Conversation, thus output from previous turns (text and audio tokens) + will become the input for later turns. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output.py new file mode 100644 index 00000000..48a5d67e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .realtime_audio_formats import RealtimeAudioFormats + +__all__ = ["RealtimeResponseCreateAudioOutput", "Output"] + + +class Output(BaseModel): + format: Optional[RealtimeAudioFormats] = None + """The format of the output audio.""" + + voice: Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], None + ] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend + `marin` and `cedar` for best quality. + """ + + +class RealtimeResponseCreateAudioOutput(BaseModel): + output: Optional[Output] = None diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output_param.py new file mode 100644 index 00000000..9aa6d288 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_audio_output_param.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypedDict + +from .realtime_audio_formats_param import RealtimeAudioFormatsParam + +__all__ = ["RealtimeResponseCreateAudioOutputParam", "Output"] + + +class Output(TypedDict, total=False): + format: RealtimeAudioFormatsParam + """The format of the output audio.""" + + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]] + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend + `marin` and `cedar` for best quality. + """ + + +class RealtimeResponseCreateAudioOutputParam(TypedDict, total=False): + output: Output diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool.py new file mode 100644 index 00000000..119b4a45 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool.py @@ -0,0 +1,135 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel + +__all__ = [ + "RealtimeResponseCreateMcpTool", + "AllowedTools", + "AllowedToolsMcpToolFilter", + "RequireApproval", + "RequireApprovalMcpToolApprovalFilter", + "RequireApprovalMcpToolApprovalFilterAlways", + "RequireApprovalMcpToolApprovalFilterNever", +] + + +class AllowedToolsMcpToolFilter(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: Optional[List[str]] = None + """List of allowed tool names.""" + + +AllowedTools: TypeAlias = Union[List[str], AllowedToolsMcpToolFilter, None] + + +class RequireApprovalMcpToolApprovalFilterAlways(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: Optional[List[str]] = None + """List of allowed tool names.""" + + +class RequireApprovalMcpToolApprovalFilterNever(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: Optional[List[str]] = None + """List of allowed tool names.""" + + +class RequireApprovalMcpToolApprovalFilter(BaseModel): + always: Optional[RequireApprovalMcpToolApprovalFilterAlways] = None + """A filter object to specify which tools are allowed.""" + + never: Optional[RequireApprovalMcpToolApprovalFilterNever] = None + """A filter object to specify which tools are allowed.""" + + +RequireApproval: TypeAlias = Union[RequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None] + + +class RealtimeResponseCreateMcpTool(BaseModel): + server_label: str + """A label for this MCP server, used to identify it in tool calls.""" + + type: Literal["mcp"] + """The type of the MCP tool. Always `mcp`.""" + + allowed_tools: Optional[AllowedTools] = None + """List of allowed tool names or a filter object.""" + + authorization: Optional[str] = None + """ + An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + """ + + connector_id: Optional[ + Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + ] = None + """Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + """ + + headers: Optional[Dict[str, str]] = None + """Optional HTTP headers to send to the MCP server. + + Use for authentication or other purposes. + """ + + require_approval: Optional[RequireApproval] = None + """Specify which of the MCP server's tools require approval.""" + + server_description: Optional[str] = None + """Optional description of the MCP server, used to provide more context.""" + + server_url: Optional[str] = None + """The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool_param.py new file mode 100644 index 00000000..3b9cf047 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_mcp_tool_param.py @@ -0,0 +1,135 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..._types import SequenceNotStr + +__all__ = [ + "RealtimeResponseCreateMcpToolParam", + "AllowedTools", + "AllowedToolsMcpToolFilter", + "RequireApproval", + "RequireApprovalMcpToolApprovalFilter", + "RequireApprovalMcpToolApprovalFilterAlways", + "RequireApprovalMcpToolApprovalFilterNever", +] + + +class AllowedToolsMcpToolFilter(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] + """List of allowed tool names.""" + + +AllowedTools: TypeAlias = Union[SequenceNotStr[str], AllowedToolsMcpToolFilter] + + +class RequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] + """List of allowed tool names.""" + + +class RequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] + """List of allowed tool names.""" + + +class RequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + always: RequireApprovalMcpToolApprovalFilterAlways + """A filter object to specify which tools are allowed.""" + + never: RequireApprovalMcpToolApprovalFilterNever + """A filter object to specify which tools are allowed.""" + + +RequireApproval: TypeAlias = Union[RequireApprovalMcpToolApprovalFilter, Literal["always", "never"]] + + +class RealtimeResponseCreateMcpToolParam(TypedDict, total=False): + server_label: Required[str] + """A label for this MCP server, used to identify it in tool calls.""" + + type: Required[Literal["mcp"]] + """The type of the MCP tool. Always `mcp`.""" + + allowed_tools: Optional[AllowedTools] + """List of allowed tool names or a filter object.""" + + authorization: str + """ + An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + """ + + connector_id: Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + """Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + """ + + headers: Optional[Dict[str, str]] + """Optional HTTP headers to send to the MCP server. + + Use for authentication or other purposes. + """ + + require_approval: Optional[RequireApproval] + """Specify which of the MCP server's tools require approval.""" + + server_description: str + """Optional description of the MCP server, used to provide more context.""" + + server_url: str + """The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params.py new file mode 100644 index 00000000..4dfd1fd3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params.py @@ -0,0 +1,98 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from ..shared.metadata import Metadata +from .conversation_item import ConversationItem +from .realtime_function_tool import RealtimeFunctionTool +from ..responses.response_prompt import ResponsePrompt +from ..responses.tool_choice_mcp import ToolChoiceMcp +from ..responses.tool_choice_options import ToolChoiceOptions +from ..responses.tool_choice_function import ToolChoiceFunction +from .realtime_response_create_mcp_tool import RealtimeResponseCreateMcpTool +from .realtime_response_create_audio_output import RealtimeResponseCreateAudioOutput + +__all__ = ["RealtimeResponseCreateParams", "ToolChoice", "Tool"] + +ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunction, ToolChoiceMcp] + +Tool: TypeAlias = Union[RealtimeFunctionTool, RealtimeResponseCreateMcpTool] + + +class RealtimeResponseCreateParams(BaseModel): + audio: Optional[RealtimeResponseCreateAudioOutput] = None + """Configuration for audio input and output.""" + + conversation: Union[str, Literal["auto", "none"], None] = None + """Controls which conversation the response is added to. + + Currently supports `auto` and `none`, with `auto` as the default value. The + `auto` value means that the contents of the response will be added to the + default conversation. Set this to `none` to create an out-of-band response which + will not add items to default conversation. + """ + + input: Optional[List[ConversationItem]] = None + """Input items to include in the prompt for the model. + + Using this field creates a new context for this Response instead of using the + default conversation. An empty array `[]` will clear the context for this + Response. Note that this can include references to items that previously + appeared in the session using their id. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. Note that the server sets default instructions which will be used if + this field is not set and are visible in the `session.created` event at the + start of the session. + """ + + max_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + output_modalities: Optional[List[Literal["text", "audio"]]] = None + """ + The set of modalities the model used to respond, currently the only possible + values are `[\"audio\"]`, `[\"text\"]`. Audio output always include a text + transcript. Setting the output to mode `text` will disable audio output from the + model. + """ + + prompt: Optional[ResponsePrompt] = None + """Reference to a prompt template and its variables. + + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + + tool_choice: Optional[ToolChoice] = None + """How the model chooses tools. + + Provide one of the string modes or force a specific function/MCP tool. + """ + + tools: Optional[List[Tool]] = None + """Tools available to the model.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params_param.py new file mode 100644 index 00000000..eceffccc --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_create_params_param.py @@ -0,0 +1,99 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable, Optional +from typing_extensions import Literal, TypeAlias, TypedDict + +from ..shared_params.metadata import Metadata +from .conversation_item_param import ConversationItemParam +from .realtime_function_tool_param import RealtimeFunctionToolParam +from ..responses.tool_choice_options import ToolChoiceOptions +from ..responses.response_prompt_param import ResponsePromptParam +from ..responses.tool_choice_mcp_param import ToolChoiceMcpParam +from ..responses.tool_choice_function_param import ToolChoiceFunctionParam +from .realtime_response_create_mcp_tool_param import RealtimeResponseCreateMcpToolParam +from .realtime_response_create_audio_output_param import RealtimeResponseCreateAudioOutputParam + +__all__ = ["RealtimeResponseCreateParamsParam", "ToolChoice", "Tool"] + +ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunctionParam, ToolChoiceMcpParam] + +Tool: TypeAlias = Union[RealtimeFunctionToolParam, RealtimeResponseCreateMcpToolParam] + + +class RealtimeResponseCreateParamsParam(TypedDict, total=False): + audio: RealtimeResponseCreateAudioOutputParam + """Configuration for audio input and output.""" + + conversation: Union[str, Literal["auto", "none"]] + """Controls which conversation the response is added to. + + Currently supports `auto` and `none`, with `auto` as the default value. The + `auto` value means that the contents of the response will be added to the + default conversation. Set this to `none` to create an out-of-band response which + will not add items to default conversation. + """ + + input: Iterable[ConversationItemParam] + """Input items to include in the prompt for the model. + + Using this field creates a new context for this Response instead of using the + default conversation. An empty array `[]` will clear the context for this + Response. Note that this can include references to items that previously + appeared in the session using their id. + """ + + instructions: str + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. Note that the server sets default instructions which will be used if + this field is not set and are visible in the `session.created` event at the + start of the session. + """ + + max_output_tokens: Union[int, Literal["inf"]] + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + output_modalities: List[Literal["text", "audio"]] + """ + The set of modalities the model used to respond, currently the only possible + values are `[\"audio\"]`, `[\"text\"]`. Audio output always include a text + transcript. Setting the output to mode `text` will disable audio output from the + model. + """ + + prompt: Optional[ResponsePromptParam] + """Reference to a prompt template and its variables. + + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + + tool_choice: ToolChoice + """How the model chooses tools. + + Provide one of the string modes or force a specific function/MCP tool. + """ + + tools: Iterable[Tool] + """Tools available to the model.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_status.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_status.py new file mode 100644 index 00000000..12999f61 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_status.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeResponseStatus", "Error"] + + +class Error(BaseModel): + code: Optional[str] = None + """Error code, if any.""" + + type: Optional[str] = None + """The type of error.""" + + +class RealtimeResponseStatus(BaseModel): + error: Optional[Error] = None + """ + A description of the error that caused the response to fail, populated when the + `status` is `failed`. + """ + + reason: Optional[Literal["turn_detected", "client_cancelled", "max_output_tokens", "content_filter"]] = None + """The reason the Response did not complete. + + For a `cancelled` Response, one of `turn_detected` (the server VAD detected a + new start of speech) or `client_cancelled` (the client sent a cancel event). For + an `incomplete` Response, one of `max_output_tokens` or `content_filter` (the + server-side safety filter activated and cut off the response). + """ + + type: Optional[Literal["completed", "cancelled", "incomplete", "failed"]] = None + """ + The type of error that caused the response to fail, corresponding with the + `status` field (`completed`, `cancelled`, `incomplete`, `failed`). + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage.py new file mode 100644 index 00000000..fb8893b3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .realtime_response_usage_input_token_details import RealtimeResponseUsageInputTokenDetails +from .realtime_response_usage_output_token_details import RealtimeResponseUsageOutputTokenDetails + +__all__ = ["RealtimeResponseUsage"] + + +class RealtimeResponseUsage(BaseModel): + input_token_details: Optional[RealtimeResponseUsageInputTokenDetails] = None + """Details about the input tokens used in the Response. + + Cached tokens are tokens from previous turns in the conversation that are + included as context for the current response. Cached tokens here are counted as + a subset of input tokens, meaning input tokens will include cached and uncached + tokens. + """ + + input_tokens: Optional[int] = None + """ + The number of input tokens used in the Response, including text and audio + tokens. + """ + + output_token_details: Optional[RealtimeResponseUsageOutputTokenDetails] = None + """Details about the output tokens used in the Response.""" + + output_tokens: Optional[int] = None + """ + The number of output tokens sent in the Response, including text and audio + tokens. + """ + + total_tokens: Optional[int] = None + """ + The total number of tokens in the Response including input and output text and + audio tokens. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_input_token_details.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_input_token_details.py new file mode 100644 index 00000000..e14a74a8 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_input_token_details.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["RealtimeResponseUsageInputTokenDetails", "CachedTokensDetails"] + + +class CachedTokensDetails(BaseModel): + audio_tokens: Optional[int] = None + """The number of cached audio tokens used as input for the Response.""" + + image_tokens: Optional[int] = None + """The number of cached image tokens used as input for the Response.""" + + text_tokens: Optional[int] = None + """The number of cached text tokens used as input for the Response.""" + + +class RealtimeResponseUsageInputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """The number of audio tokens used as input for the Response.""" + + cached_tokens: Optional[int] = None + """The number of cached tokens used as input for the Response.""" + + cached_tokens_details: Optional[CachedTokensDetails] = None + """Details about the cached tokens used as input for the Response.""" + + image_tokens: Optional[int] = None + """The number of image tokens used as input for the Response.""" + + text_tokens: Optional[int] = None + """The number of text tokens used as input for the Response.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_output_token_details.py b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_output_token_details.py new file mode 100644 index 00000000..dfa97a1f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_response_usage_output_token_details.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["RealtimeResponseUsageOutputTokenDetails"] + + +class RealtimeResponseUsageOutputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """The number of audio tokens used in the Response.""" + + text_tokens: Optional[int] = None + """The number of text tokens used in the Response.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_server_event.py b/portkey_ai/_vendor/openai/types/realtime/realtime_server_event.py new file mode 100644 index 00000000..1605b81a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_server_event.py @@ -0,0 +1,155 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .conversation_item import ConversationItem +from .response_done_event import ResponseDoneEvent +from .realtime_error_event import RealtimeErrorEvent +from .mcp_list_tools_failed import McpListToolsFailed +from .session_created_event import SessionCreatedEvent +from .session_updated_event import SessionUpdatedEvent +from .conversation_item_done import ConversationItemDone +from .response_created_event import ResponseCreatedEvent +from .conversation_item_added import ConversationItemAdded +from .mcp_list_tools_completed import McpListToolsCompleted +from .response_mcp_call_failed import ResponseMcpCallFailed +from .response_text_done_event import ResponseTextDoneEvent +from .rate_limits_updated_event import RateLimitsUpdatedEvent +from .response_audio_done_event import ResponseAudioDoneEvent +from .response_text_delta_event import ResponseTextDeltaEvent +from .conversation_created_event import ConversationCreatedEvent +from .mcp_list_tools_in_progress import McpListToolsInProgress +from .response_audio_delta_event import ResponseAudioDeltaEvent +from .response_mcp_call_completed import ResponseMcpCallCompleted +from .response_mcp_call_in_progress import ResponseMcpCallInProgress +from .conversation_item_created_event import ConversationItemCreatedEvent +from .conversation_item_deleted_event import ConversationItemDeletedEvent +from .response_output_item_done_event import ResponseOutputItemDoneEvent +from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent +from .response_content_part_done_event import ResponseContentPartDoneEvent +from .response_mcp_call_arguments_done import ResponseMcpCallArgumentsDone +from .response_output_item_added_event import ResponseOutputItemAddedEvent +from .conversation_item_truncated_event import ConversationItemTruncatedEvent +from .response_content_part_added_event import ResponseContentPartAddedEvent +from .response_mcp_call_arguments_delta import ResponseMcpCallArgumentsDelta +from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent +from .input_audio_buffer_timeout_triggered import InputAudioBufferTimeoutTriggered +from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent +from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent +from .input_audio_buffer_speech_started_event import InputAudioBufferSpeechStartedEvent +from .input_audio_buffer_speech_stopped_event import InputAudioBufferSpeechStoppedEvent +from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent +from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent +from .conversation_item_input_audio_transcription_segment import ConversationItemInputAudioTranscriptionSegment +from .conversation_item_input_audio_transcription_delta_event import ConversationItemInputAudioTranscriptionDeltaEvent +from .conversation_item_input_audio_transcription_failed_event import ConversationItemInputAudioTranscriptionFailedEvent +from .conversation_item_input_audio_transcription_completed_event import ( + ConversationItemInputAudioTranscriptionCompletedEvent, +) + +__all__ = [ + "RealtimeServerEvent", + "ConversationItemRetrieved", + "OutputAudioBufferStarted", + "OutputAudioBufferStopped", + "OutputAudioBufferCleared", +] + + +class ConversationItemRetrieved(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """A single item within a Realtime conversation.""" + + type: Literal["conversation.item.retrieved"] + """The event type, must be `conversation.item.retrieved`.""" + + +class OutputAudioBufferStarted(BaseModel): + event_id: str + """The unique ID of the server event.""" + + response_id: str + """The unique ID of the response that produced the audio.""" + + type: Literal["output_audio_buffer.started"] + """The event type, must be `output_audio_buffer.started`.""" + + +class OutputAudioBufferStopped(BaseModel): + event_id: str + """The unique ID of the server event.""" + + response_id: str + """The unique ID of the response that produced the audio.""" + + type: Literal["output_audio_buffer.stopped"] + """The event type, must be `output_audio_buffer.stopped`.""" + + +class OutputAudioBufferCleared(BaseModel): + event_id: str + """The unique ID of the server event.""" + + response_id: str + """The unique ID of the response that produced the audio.""" + + type: Literal["output_audio_buffer.cleared"] + """The event type, must be `output_audio_buffer.cleared`.""" + + +RealtimeServerEvent: TypeAlias = Annotated[ + Union[ + ConversationCreatedEvent, + ConversationItemCreatedEvent, + ConversationItemDeletedEvent, + ConversationItemInputAudioTranscriptionCompletedEvent, + ConversationItemInputAudioTranscriptionDeltaEvent, + ConversationItemInputAudioTranscriptionFailedEvent, + ConversationItemRetrieved, + ConversationItemTruncatedEvent, + RealtimeErrorEvent, + InputAudioBufferClearedEvent, + InputAudioBufferCommittedEvent, + InputAudioBufferSpeechStartedEvent, + InputAudioBufferSpeechStoppedEvent, + RateLimitsUpdatedEvent, + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreatedEvent, + ResponseDoneEvent, + ResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, + SessionCreatedEvent, + SessionUpdatedEvent, + OutputAudioBufferStarted, + OutputAudioBufferStopped, + OutputAudioBufferCleared, + ConversationItemAdded, + ConversationItemDone, + InputAudioBufferTimeoutTriggered, + ConversationItemInputAudioTranscriptionSegment, + McpListToolsInProgress, + McpListToolsCompleted, + McpListToolsFailed, + ResponseMcpCallArgumentsDelta, + ResponseMcpCallArgumentsDone, + ResponseMcpCallInProgress, + ResponseMcpCallCompleted, + ResponseMcpCallFailed, + ], + PropertyInfo(discriminator="type"), +] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_session_client_secret.py b/portkey_ai/_vendor/openai/types/realtime/realtime_session_client_secret.py new file mode 100644 index 00000000..a4998802 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_session_client_secret.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ..._models import BaseModel + +__all__ = ["RealtimeSessionClientSecret"] + + +class RealtimeSessionClientSecret(BaseModel): + expires_at: int + """Timestamp for when the token expires. + + Currently, all tokens expire after one minute. + """ + + value: str + """ + Ephemeral key usable in client environments to authenticate connections to the + Realtime API. Use this in client-side environments rather than a standard API + token, which should only be used server-side. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request.py b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request.py new file mode 100644 index 00000000..578bc438 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request.py @@ -0,0 +1,107 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .realtime_truncation import RealtimeTruncation +from .realtime_audio_config import RealtimeAudioConfig +from .realtime_tools_config import RealtimeToolsConfig +from .realtime_tracing_config import RealtimeTracingConfig +from ..responses.response_prompt import ResponsePrompt +from .realtime_tool_choice_config import RealtimeToolChoiceConfig + +__all__ = ["RealtimeSessionCreateRequest"] + + +class RealtimeSessionCreateRequest(BaseModel): + type: Literal["realtime"] + """The type of session to create. Always `realtime` for the Realtime API.""" + + audio: Optional[RealtimeAudioConfig] = None + """Configuration for input and output audio.""" + + include: Optional[List[Literal["item.input_audio_transcription.logprobs"]]] = None + """Additional fields to include in server outputs. + + `item.input_audio_transcription.logprobs`: Include logprobs for input audio + transcription. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + model: Union[ + str, + Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-realtime-preview-2025-06-03", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ], + None, + ] = None + """The Realtime model used for this session.""" + + output_modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + It defaults to `["audio"]`, indicating that the model will respond with audio + plus a transcript. `["text"]` can be used to make the model respond with text + only. It is not possible to request both `text` and `audio` at the same time. + """ + + prompt: Optional[ResponsePrompt] = None + """Reference to a prompt template and its variables. + + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + + tool_choice: Optional[RealtimeToolChoiceConfig] = None + """How the model chooses tools. + + Provide one of the string modes or force a specific function/MCP tool. + """ + + tools: Optional[RealtimeToolsConfig] = None + """Tools available to the model.""" + + tracing: Optional[RealtimeTracingConfig] = None + """ + Realtime API can write session traces to the + [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once + tracing is enabled for a session, the configuration cannot be modified. + + `auto` will create a trace for the session with default values for the workflow + name, group id, and metadata. + """ + + truncation: Optional[RealtimeTruncation] = None + """ + Controls how the realtime conversation is truncated prior to model inference. + The default is `auto`. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request_param.py new file mode 100644 index 00000000..5f7819fa --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_request_param.py @@ -0,0 +1,107 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal, Required, TypedDict + +from .realtime_truncation_param import RealtimeTruncationParam +from .realtime_audio_config_param import RealtimeAudioConfigParam +from .realtime_tools_config_param import RealtimeToolsConfigParam +from .realtime_tracing_config_param import RealtimeTracingConfigParam +from ..responses.response_prompt_param import ResponsePromptParam +from .realtime_tool_choice_config_param import RealtimeToolChoiceConfigParam + +__all__ = ["RealtimeSessionCreateRequestParam"] + + +class RealtimeSessionCreateRequestParam(TypedDict, total=False): + type: Required[Literal["realtime"]] + """The type of session to create. Always `realtime` for the Realtime API.""" + + audio: RealtimeAudioConfigParam + """Configuration for input and output audio.""" + + include: List[Literal["item.input_audio_transcription.logprobs"]] + """Additional fields to include in server outputs. + + `item.input_audio_transcription.logprobs`: Include logprobs for input audio + transcription. + """ + + instructions: str + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_output_tokens: Union[int, Literal["inf"]] + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + model: Union[ + str, + Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-realtime-preview-2025-06-03", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ], + ] + """The Realtime model used for this session.""" + + output_modalities: List[Literal["text", "audio"]] + """The set of modalities the model can respond with. + + It defaults to `["audio"]`, indicating that the model will respond with audio + plus a transcript. `["text"]` can be used to make the model respond with text + only. It is not possible to request both `text` and `audio` at the same time. + """ + + prompt: Optional[ResponsePromptParam] + """Reference to a prompt template and its variables. + + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + + tool_choice: RealtimeToolChoiceConfigParam + """How the model chooses tools. + + Provide one of the string modes or force a specific function/MCP tool. + """ + + tools: RealtimeToolsConfigParam + """Tools available to the model.""" + + tracing: Optional[RealtimeTracingConfigParam] + """ + Realtime API can write session traces to the + [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once + tracing is enabled for a session, the configuration cannot be modified. + + `auto` will create a trace for the session with default values for the workflow + name, group id, and metadata. + """ + + truncation: RealtimeTruncationParam + """ + Controls how the realtime conversation is truncated prior to model inference. + The default is `auto`. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_response.py b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_response.py new file mode 100644 index 00000000..8d7bfd6d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_session_create_response.py @@ -0,0 +1,460 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .audio_transcription import AudioTranscription +from .realtime_truncation import RealtimeTruncation +from .noise_reduction_type import NoiseReductionType +from .realtime_audio_formats import RealtimeAudioFormats +from .realtime_function_tool import RealtimeFunctionTool +from ..responses.response_prompt import ResponsePrompt +from ..responses.tool_choice_mcp import ToolChoiceMcp +from ..responses.tool_choice_options import ToolChoiceOptions +from .realtime_session_client_secret import RealtimeSessionClientSecret +from ..responses.tool_choice_function import ToolChoiceFunction + +__all__ = [ + "RealtimeSessionCreateResponse", + "Audio", + "AudioInput", + "AudioInputNoiseReduction", + "AudioInputTurnDetection", + "AudioInputTurnDetectionServerVad", + "AudioInputTurnDetectionSemanticVad", + "AudioOutput", + "ToolChoice", + "Tool", + "ToolMcpTool", + "ToolMcpToolAllowedTools", + "ToolMcpToolAllowedToolsMcpToolFilter", + "ToolMcpToolRequireApproval", + "ToolMcpToolRequireApprovalMcpToolApprovalFilter", + "ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways", + "ToolMcpToolRequireApprovalMcpToolApprovalFilterNever", + "Tracing", + "TracingTracingConfiguration", +] + + +class AudioInputNoiseReduction(BaseModel): + type: Optional[NoiseReductionType] = None + """Type of noise reduction. + + `near_field` is for close-talking microphones such as headphones, `far_field` is + for far-field microphones such as laptop or conference room microphones. + """ + + +class AudioInputTurnDetectionServerVad(BaseModel): + type: Literal["server_vad"] + """Type of turn detection, `server_vad` to turn on simple Server VAD.""" + + create_response: Optional[bool] = None + """ + Whether or not to automatically generate a response when a VAD stop event + occurs. + """ + + idle_timeout_ms: Optional[int] = None + """Optional timeout after which a model response will be triggered automatically. + + This is useful for situations in which a long pause from the user is unexpected, + such as a phone call. The model will effectively prompt the user to continue the + conversation based on the current context. + + The timeout value will be applied after the last model response's audio has + finished playing, i.e. it's set to the `response.done` time plus audio playback + duration. + + An `input_audio_buffer.timeout_triggered` event (plus events associated with the + Response) will be emitted when the timeout is reached. Idle timeout is currently + only supported for `server_vad` mode. + """ + + interrupt_response: Optional[bool] = None + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. + """ + + prefix_padding_ms: Optional[int] = None + """Used only for `server_vad` mode. + + Amount of audio to include before the VAD detected speech (in milliseconds). + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Used only for `server_vad` mode. + + Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + With shorter values the model will respond more quickly, but may jump in on + short pauses from the user. + """ + + threshold: Optional[float] = None + """Used only for `server_vad` mode. + + Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + threshold will require louder audio to activate the model, and thus might + perform better in noisy environments. + """ + + +class AudioInputTurnDetectionSemanticVad(BaseModel): + type: Literal["semantic_vad"] + """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" + + create_response: Optional[bool] = None + """ + Whether or not to automatically generate a response when a VAD stop event + occurs. + """ + + eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None + """Used only for `semantic_vad` mode. + + The eagerness of the model to respond. `low` will wait longer for the user to + continue speaking, `high` will respond more quickly. `auto` is the default and + is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s, + 4s, and 2s respectively. + """ + + interrupt_response: Optional[bool] = None + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. + """ + + +AudioInputTurnDetection: TypeAlias = Annotated[ + Union[AudioInputTurnDetectionServerVad, AudioInputTurnDetectionSemanticVad, None], + PropertyInfo(discriminator="type"), +] + + +class AudioInput(BaseModel): + format: Optional[RealtimeAudioFormats] = None + """The format of the input audio.""" + + noise_reduction: Optional[AudioInputNoiseReduction] = None + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. Noise reduction filters audio added to + the input audio buffer before it is sent to VAD and the model. Filtering the + audio can improve VAD and turn detection accuracy (reducing false positives) and + model performance by improving perception of the input audio. + """ + + transcription: Optional[AudioTranscription] = None + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through + [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as guidance of input audio content rather than precisely + what the model heard. The client can optionally set the language and prompt for + transcription, these offer additional guidance to the transcription service. + """ + + turn_detection: Optional[AudioInputTurnDetection] = None + """Configuration for turn detection, ether Server VAD or Semantic VAD. + + This can be set to `null` to turn off, in which case the client must manually + trigger model response. + + Server VAD means that the model will detect the start and end of speech based on + audio volume and respond at the end of user speech. + + Semantic VAD is more advanced and uses a turn detection model (in conjunction + with VAD) to semantically estimate whether the user has finished speaking, then + dynamically sets a timeout based on this probability. For example, if user audio + trails off with "uhhm", the model will score a low probability of turn end and + wait longer for the user to continue speaking. This can be useful for more + natural conversations, but may have a higher latency. + """ + + +class AudioOutput(BaseModel): + format: Optional[RealtimeAudioFormats] = None + """The format of the output audio.""" + + speed: Optional[float] = None + """ + The speed of the model's spoken response as a multiple of the original speed. + 1.0 is the default speed. 0.25 is the minimum speed. 1.5 is the maximum speed. + This value can only be changed in between model turns, not while a response is + in progress. + + This parameter is a post-processing adjustment to the audio after it is + generated, it's also possible to prompt the model to speak faster or slower. + """ + + voice: Union[ + str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], None + ] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend + `marin` and `cedar` for best quality. + """ + + +class Audio(BaseModel): + input: Optional[AudioInput] = None + + output: Optional[AudioOutput] = None + + +ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunction, ToolChoiceMcp] + + +class ToolMcpToolAllowedToolsMcpToolFilter(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: Optional[List[str]] = None + """List of allowed tool names.""" + + +ToolMcpToolAllowedTools: TypeAlias = Union[List[str], ToolMcpToolAllowedToolsMcpToolFilter, None] + + +class ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: Optional[List[str]] = None + """List of allowed tool names.""" + + +class ToolMcpToolRequireApprovalMcpToolApprovalFilterNever(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: Optional[List[str]] = None + """List of allowed tool names.""" + + +class ToolMcpToolRequireApprovalMcpToolApprovalFilter(BaseModel): + always: Optional[ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways] = None + """A filter object to specify which tools are allowed.""" + + never: Optional[ToolMcpToolRequireApprovalMcpToolApprovalFilterNever] = None + """A filter object to specify which tools are allowed.""" + + +ToolMcpToolRequireApproval: TypeAlias = Union[ + ToolMcpToolRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None +] + + +class ToolMcpTool(BaseModel): + server_label: str + """A label for this MCP server, used to identify it in tool calls.""" + + type: Literal["mcp"] + """The type of the MCP tool. Always `mcp`.""" + + allowed_tools: Optional[ToolMcpToolAllowedTools] = None + """List of allowed tool names or a filter object.""" + + authorization: Optional[str] = None + """ + An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + """ + + connector_id: Optional[ + Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + ] = None + """Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + """ + + headers: Optional[Dict[str, str]] = None + """Optional HTTP headers to send to the MCP server. + + Use for authentication or other purposes. + """ + + require_approval: Optional[ToolMcpToolRequireApproval] = None + """Specify which of the MCP server's tools require approval.""" + + server_description: Optional[str] = None + """Optional description of the MCP server, used to provide more context.""" + + server_url: Optional[str] = None + """The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. + """ + + +Tool: TypeAlias = Union[RealtimeFunctionTool, ToolMcpTool] + + +class TracingTracingConfiguration(BaseModel): + group_id: Optional[str] = None + """ + The group id to attach to this trace to enable filtering and grouping in the + Traces Dashboard. + """ + + metadata: Optional[object] = None + """ + The arbitrary metadata to attach to this trace to enable filtering in the Traces + Dashboard. + """ + + workflow_name: Optional[str] = None + """The name of the workflow to attach to this trace. + + This is used to name the trace in the Traces Dashboard. + """ + + +Tracing: TypeAlias = Union[Literal["auto"], TracingTracingConfiguration, None] + + +class RealtimeSessionCreateResponse(BaseModel): + client_secret: RealtimeSessionClientSecret + """Ephemeral key returned by the API.""" + + type: Literal["realtime"] + """The type of session to create. Always `realtime` for the Realtime API.""" + + audio: Optional[Audio] = None + """Configuration for input and output audio.""" + + include: Optional[List[Literal["item.input_audio_transcription.logprobs"]]] = None + """Additional fields to include in server outputs. + + `item.input_audio_transcription.logprobs`: Include logprobs for input audio + transcription. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + model: Union[ + str, + Literal[ + "gpt-realtime", + "gpt-realtime-2025-08-28", + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-realtime-preview-2025-06-03", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ], + None, + ] = None + """The Realtime model used for this session.""" + + output_modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + It defaults to `["audio"]`, indicating that the model will respond with audio + plus a transcript. `["text"]` can be used to make the model respond with text + only. It is not possible to request both `text` and `audio` at the same time. + """ + + prompt: Optional[ResponsePrompt] = None + """Reference to a prompt template and its variables. + + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + + tool_choice: Optional[ToolChoice] = None + """How the model chooses tools. + + Provide one of the string modes or force a specific function/MCP tool. + """ + + tools: Optional[List[Tool]] = None + """Tools available to the model.""" + + tracing: Optional[Tracing] = None + """ + Realtime API can write session traces to the + [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once + tracing is enabled for a session, the configuration cannot be modified. + + `auto` will create a trace for the session with default values for the workflow + name, group id, and metadata. + """ + + truncation: Optional[RealtimeTruncation] = None + """ + Controls how the realtime conversation is truncated prior to model inference. + The default is `auto`. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tool_choice_config.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tool_choice_config.py new file mode 100644 index 00000000..f93c4900 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tool_choice_config.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import TypeAlias + +from ..responses.tool_choice_mcp import ToolChoiceMcp +from ..responses.tool_choice_options import ToolChoiceOptions +from ..responses.tool_choice_function import ToolChoiceFunction + +__all__ = ["RealtimeToolChoiceConfig"] + +RealtimeToolChoiceConfig: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunction, ToolChoiceMcp] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tool_choice_config_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tool_choice_config_param.py new file mode 100644 index 00000000..af92f243 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tool_choice_config_param.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from ..responses.tool_choice_options import ToolChoiceOptions +from ..responses.tool_choice_mcp_param import ToolChoiceMcpParam +from ..responses.tool_choice_function_param import ToolChoiceFunctionParam + +__all__ = ["RealtimeToolChoiceConfigParam"] + +RealtimeToolChoiceConfigParam: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunctionParam, ToolChoiceMcpParam] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config.py new file mode 100644 index 00000000..b97599ab --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import TypeAlias + +from .realtime_tools_config_union import RealtimeToolsConfigUnion + +__all__ = ["RealtimeToolsConfig"] + +RealtimeToolsConfig: TypeAlias = List[RealtimeToolsConfigUnion] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_param.py new file mode 100644 index 00000000..630fc746 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_param.py @@ -0,0 +1,143 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..._types import SequenceNotStr +from .realtime_function_tool_param import RealtimeFunctionToolParam + +__all__ = [ + "RealtimeToolsConfigParam", + "RealtimeToolsConfigUnionParam", + "Mcp", + "McpAllowedTools", + "McpAllowedToolsMcpToolFilter", + "McpRequireApproval", + "McpRequireApprovalMcpToolApprovalFilter", + "McpRequireApprovalMcpToolApprovalFilterAlways", + "McpRequireApprovalMcpToolApprovalFilterNever", +] + + +class McpAllowedToolsMcpToolFilter(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] + """List of allowed tool names.""" + + +McpAllowedTools: TypeAlias = Union[SequenceNotStr[str], McpAllowedToolsMcpToolFilter] + + +class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] + """List of allowed tool names.""" + + +class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] + """List of allowed tool names.""" + + +class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + always: McpRequireApprovalMcpToolApprovalFilterAlways + """A filter object to specify which tools are allowed.""" + + never: McpRequireApprovalMcpToolApprovalFilterNever + """A filter object to specify which tools are allowed.""" + + +McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"]] + + +class Mcp(TypedDict, total=False): + server_label: Required[str] + """A label for this MCP server, used to identify it in tool calls.""" + + type: Required[Literal["mcp"]] + """The type of the MCP tool. Always `mcp`.""" + + allowed_tools: Optional[McpAllowedTools] + """List of allowed tool names or a filter object.""" + + authorization: str + """ + An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + """ + + connector_id: Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + """Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + """ + + headers: Optional[Dict[str, str]] + """Optional HTTP headers to send to the MCP server. + + Use for authentication or other purposes. + """ + + require_approval: Optional[McpRequireApproval] + """Specify which of the MCP server's tools require approval.""" + + server_description: str + """Optional description of the MCP server, used to provide more context.""" + + server_url: str + """The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. + """ + + +RealtimeToolsConfigUnionParam: TypeAlias = Union[RealtimeFunctionToolParam, Mcp] + +RealtimeToolsConfigParam: TypeAlias = List[RealtimeToolsConfigUnionParam] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union.py new file mode 100644 index 00000000..e7126ed6 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union.py @@ -0,0 +1,141 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .realtime_function_tool import RealtimeFunctionTool + +__all__ = [ + "RealtimeToolsConfigUnion", + "Mcp", + "McpAllowedTools", + "McpAllowedToolsMcpToolFilter", + "McpRequireApproval", + "McpRequireApprovalMcpToolApprovalFilter", + "McpRequireApprovalMcpToolApprovalFilterAlways", + "McpRequireApprovalMcpToolApprovalFilterNever", +] + + +class McpAllowedToolsMcpToolFilter(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: Optional[List[str]] = None + """List of allowed tool names.""" + + +McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpToolFilter, None] + + +class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: Optional[List[str]] = None + """List of allowed tool names.""" + + +class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: Optional[List[str]] = None + """List of allowed tool names.""" + + +class McpRequireApprovalMcpToolApprovalFilter(BaseModel): + always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None + """A filter object to specify which tools are allowed.""" + + never: Optional[McpRequireApprovalMcpToolApprovalFilterNever] = None + """A filter object to specify which tools are allowed.""" + + +McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None] + + +class Mcp(BaseModel): + server_label: str + """A label for this MCP server, used to identify it in tool calls.""" + + type: Literal["mcp"] + """The type of the MCP tool. Always `mcp`.""" + + allowed_tools: Optional[McpAllowedTools] = None + """List of allowed tool names or a filter object.""" + + authorization: Optional[str] = None + """ + An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + """ + + connector_id: Optional[ + Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + ] = None + """Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + """ + + headers: Optional[Dict[str, str]] = None + """Optional HTTP headers to send to the MCP server. + + Use for authentication or other purposes. + """ + + require_approval: Optional[McpRequireApproval] = None + """Specify which of the MCP server's tools require approval.""" + + server_description: Optional[str] = None + """Optional description of the MCP server, used to provide more context.""" + + server_url: Optional[str] = None + """The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. + """ + + +RealtimeToolsConfigUnion: TypeAlias = Annotated[Union[RealtimeFunctionTool, Mcp], PropertyInfo(discriminator="type")] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union_param.py new file mode 100644 index 00000000..9ee58fdb --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tools_config_union_param.py @@ -0,0 +1,140 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..._types import SequenceNotStr +from .realtime_function_tool_param import RealtimeFunctionToolParam + +__all__ = [ + "RealtimeToolsConfigUnionParam", + "Mcp", + "McpAllowedTools", + "McpAllowedToolsMcpToolFilter", + "McpRequireApproval", + "McpRequireApprovalMcpToolApprovalFilter", + "McpRequireApprovalMcpToolApprovalFilterAlways", + "McpRequireApprovalMcpToolApprovalFilterNever", +] + + +class McpAllowedToolsMcpToolFilter(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] + """List of allowed tool names.""" + + +McpAllowedTools: TypeAlias = Union[SequenceNotStr[str], McpAllowedToolsMcpToolFilter] + + +class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] + """List of allowed tool names.""" + + +class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] + """List of allowed tool names.""" + + +class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + always: McpRequireApprovalMcpToolApprovalFilterAlways + """A filter object to specify which tools are allowed.""" + + never: McpRequireApprovalMcpToolApprovalFilterNever + """A filter object to specify which tools are allowed.""" + + +McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"]] + + +class Mcp(TypedDict, total=False): + server_label: Required[str] + """A label for this MCP server, used to identify it in tool calls.""" + + type: Required[Literal["mcp"]] + """The type of the MCP tool. Always `mcp`.""" + + allowed_tools: Optional[McpAllowedTools] + """List of allowed tool names or a filter object.""" + + authorization: str + """ + An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + """ + + connector_id: Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + """Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + """ + + headers: Optional[Dict[str, str]] + """Optional HTTP headers to send to the MCP server. + + Use for authentication or other purposes. + """ + + require_approval: Optional[McpRequireApproval] + """Specify which of the MCP server's tools require approval.""" + + server_description: str + """Optional description of the MCP server, used to provide more context.""" + + server_url: str + """The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. + """ + + +RealtimeToolsConfigUnionParam: TypeAlias = Union[RealtimeFunctionToolParam, Mcp] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config.py new file mode 100644 index 00000000..1c46de79 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel + +__all__ = ["RealtimeTracingConfig", "TracingConfiguration"] + + +class TracingConfiguration(BaseModel): + group_id: Optional[str] = None + """ + The group id to attach to this trace to enable filtering and grouping in the + Traces Dashboard. + """ + + metadata: Optional[object] = None + """ + The arbitrary metadata to attach to this trace to enable filtering in the Traces + Dashboard. + """ + + workflow_name: Optional[str] = None + """The name of the workflow to attach to this trace. + + This is used to name the trace in the Traces Dashboard. + """ + + +RealtimeTracingConfig: TypeAlias = Union[Literal["auto"], TracingConfiguration, None] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config_param.py new file mode 100644 index 00000000..fd9e2662 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_tracing_config_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypeAlias, TypedDict + +__all__ = ["RealtimeTracingConfigParam", "TracingConfiguration"] + + +class TracingConfiguration(TypedDict, total=False): + group_id: str + """ + The group id to attach to this trace to enable filtering and grouping in the + Traces Dashboard. + """ + + metadata: object + """ + The arbitrary metadata to attach to this trace to enable filtering in the Traces + Dashboard. + """ + + workflow_name: str + """The name of the workflow to attach to this trace. + + This is used to name the trace in the Traces Dashboard. + """ + + +RealtimeTracingConfigParam: TypeAlias = Union[Literal["auto"], TracingConfiguration] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio.py new file mode 100644 index 00000000..a5506947 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .realtime_transcription_session_audio_input import RealtimeTranscriptionSessionAudioInput + +__all__ = ["RealtimeTranscriptionSessionAudio"] + + +class RealtimeTranscriptionSessionAudio(BaseModel): + input: Optional[RealtimeTranscriptionSessionAudioInput] = None diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input.py new file mode 100644 index 00000000..efc321cb --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input.py @@ -0,0 +1,65 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .audio_transcription import AudioTranscription +from .noise_reduction_type import NoiseReductionType +from .realtime_audio_formats import RealtimeAudioFormats +from .realtime_transcription_session_audio_input_turn_detection import ( + RealtimeTranscriptionSessionAudioInputTurnDetection, +) + +__all__ = ["RealtimeTranscriptionSessionAudioInput", "NoiseReduction"] + + +class NoiseReduction(BaseModel): + type: Optional[NoiseReductionType] = None + """Type of noise reduction. + + `near_field` is for close-talking microphones such as headphones, `far_field` is + for far-field microphones such as laptop or conference room microphones. + """ + + +class RealtimeTranscriptionSessionAudioInput(BaseModel): + format: Optional[RealtimeAudioFormats] = None + """The PCM audio format. Only a 24kHz sample rate is supported.""" + + noise_reduction: Optional[NoiseReduction] = None + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. Noise reduction filters audio added to + the input audio buffer before it is sent to VAD and the model. Filtering the + audio can improve VAD and turn detection accuracy (reducing false positives) and + model performance by improving perception of the input audio. + """ + + transcription: Optional[AudioTranscription] = None + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through + [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as guidance of input audio content rather than precisely + what the model heard. The client can optionally set the language and prompt for + transcription, these offer additional guidance to the transcription service. + """ + + turn_detection: Optional[RealtimeTranscriptionSessionAudioInputTurnDetection] = None + """Configuration for turn detection, ether Server VAD or Semantic VAD. + + This can be set to `null` to turn off, in which case the client must manually + trigger model response. + + Server VAD means that the model will detect the start and end of speech based on + audio volume and respond at the end of user speech. + + Semantic VAD is more advanced and uses a turn detection model (in conjunction + with VAD) to semantically estimate whether the user has finished speaking, then + dynamically sets a timeout based on this probability. For example, if user audio + trails off with "uhhm", the model will score a low probability of turn end and + wait longer for the user to continue speaking. This can be useful for more + natural conversations, but may have a higher latency. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_param.py new file mode 100644 index 00000000..c9153b68 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_param.py @@ -0,0 +1,67 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import TypedDict + +from .noise_reduction_type import NoiseReductionType +from .audio_transcription_param import AudioTranscriptionParam +from .realtime_audio_formats_param import RealtimeAudioFormatsParam +from .realtime_transcription_session_audio_input_turn_detection_param import ( + RealtimeTranscriptionSessionAudioInputTurnDetectionParam, +) + +__all__ = ["RealtimeTranscriptionSessionAudioInputParam", "NoiseReduction"] + + +class NoiseReduction(TypedDict, total=False): + type: NoiseReductionType + """Type of noise reduction. + + `near_field` is for close-talking microphones such as headphones, `far_field` is + for far-field microphones such as laptop or conference room microphones. + """ + + +class RealtimeTranscriptionSessionAudioInputParam(TypedDict, total=False): + format: RealtimeAudioFormatsParam + """The PCM audio format. Only a 24kHz sample rate is supported.""" + + noise_reduction: NoiseReduction + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. Noise reduction filters audio added to + the input audio buffer before it is sent to VAD and the model. Filtering the + audio can improve VAD and turn detection accuracy (reducing false positives) and + model performance by improving perception of the input audio. + """ + + transcription: AudioTranscriptionParam + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through + [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as guidance of input audio content rather than precisely + what the model heard. The client can optionally set the language and prompt for + transcription, these offer additional guidance to the transcription service. + """ + + turn_detection: Optional[RealtimeTranscriptionSessionAudioInputTurnDetectionParam] + """Configuration for turn detection, ether Server VAD or Semantic VAD. + + This can be set to `null` to turn off, in which case the client must manually + trigger model response. + + Server VAD means that the model will detect the start and end of speech based on + audio volume and respond at the end of user speech. + + Semantic VAD is more advanced and uses a turn detection model (in conjunction + with VAD) to semantically estimate whether the user has finished speaking, then + dynamically sets a timeout based on this probability. For example, if user audio + trails off with "uhhm", the model will score a low probability of turn end and + wait longer for the user to continue speaking. This can be useful for more + natural conversations, but may have a higher latency. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py new file mode 100644 index 00000000..7dc7a8f3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py @@ -0,0 +1,98 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = ["RealtimeTranscriptionSessionAudioInputTurnDetection", "ServerVad", "SemanticVad"] + + +class ServerVad(BaseModel): + type: Literal["server_vad"] + """Type of turn detection, `server_vad` to turn on simple Server VAD.""" + + create_response: Optional[bool] = None + """ + Whether or not to automatically generate a response when a VAD stop event + occurs. + """ + + idle_timeout_ms: Optional[int] = None + """Optional timeout after which a model response will be triggered automatically. + + This is useful for situations in which a long pause from the user is unexpected, + such as a phone call. The model will effectively prompt the user to continue the + conversation based on the current context. + + The timeout value will be applied after the last model response's audio has + finished playing, i.e. it's set to the `response.done` time plus audio playback + duration. + + An `input_audio_buffer.timeout_triggered` event (plus events associated with the + Response) will be emitted when the timeout is reached. Idle timeout is currently + only supported for `server_vad` mode. + """ + + interrupt_response: Optional[bool] = None + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. + """ + + prefix_padding_ms: Optional[int] = None + """Used only for `server_vad` mode. + + Amount of audio to include before the VAD detected speech (in milliseconds). + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Used only for `server_vad` mode. + + Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + With shorter values the model will respond more quickly, but may jump in on + short pauses from the user. + """ + + threshold: Optional[float] = None + """Used only for `server_vad` mode. + + Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + threshold will require louder audio to activate the model, and thus might + perform better in noisy environments. + """ + + +class SemanticVad(BaseModel): + type: Literal["semantic_vad"] + """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" + + create_response: Optional[bool] = None + """ + Whether or not to automatically generate a response when a VAD stop event + occurs. + """ + + eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None + """Used only for `semantic_vad` mode. + + The eagerness of the model to respond. `low` will wait longer for the user to + continue speaking, `high` will respond more quickly. `auto` is the default and + is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s, + 4s, and 2s respectively. + """ + + interrupt_response: Optional[bool] = None + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. + """ + + +RealtimeTranscriptionSessionAudioInputTurnDetection: TypeAlias = Annotated[ + Union[ServerVad, SemanticVad, None], PropertyInfo(discriminator="type") +] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py new file mode 100644 index 00000000..d899b8c5 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py @@ -0,0 +1,95 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["RealtimeTranscriptionSessionAudioInputTurnDetectionParam", "ServerVad", "SemanticVad"] + + +class ServerVad(TypedDict, total=False): + type: Required[Literal["server_vad"]] + """Type of turn detection, `server_vad` to turn on simple Server VAD.""" + + create_response: bool + """ + Whether or not to automatically generate a response when a VAD stop event + occurs. + """ + + idle_timeout_ms: Optional[int] + """Optional timeout after which a model response will be triggered automatically. + + This is useful for situations in which a long pause from the user is unexpected, + such as a phone call. The model will effectively prompt the user to continue the + conversation based on the current context. + + The timeout value will be applied after the last model response's audio has + finished playing, i.e. it's set to the `response.done` time plus audio playback + duration. + + An `input_audio_buffer.timeout_triggered` event (plus events associated with the + Response) will be emitted when the timeout is reached. Idle timeout is currently + only supported for `server_vad` mode. + """ + + interrupt_response: bool + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. + """ + + prefix_padding_ms: int + """Used only for `server_vad` mode. + + Amount of audio to include before the VAD detected speech (in milliseconds). + Defaults to 300ms. + """ + + silence_duration_ms: int + """Used only for `server_vad` mode. + + Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + With shorter values the model will respond more quickly, but may jump in on + short pauses from the user. + """ + + threshold: float + """Used only for `server_vad` mode. + + Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + threshold will require louder audio to activate the model, and thus might + perform better in noisy environments. + """ + + +class SemanticVad(TypedDict, total=False): + type: Required[Literal["semantic_vad"]] + """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" + + create_response: bool + """ + Whether or not to automatically generate a response when a VAD stop event + occurs. + """ + + eagerness: Literal["low", "medium", "high", "auto"] + """Used only for `semantic_vad` mode. + + The eagerness of the model to respond. `low` will wait longer for the user to + continue speaking, `high` will respond more quickly. `auto` is the default and + is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s, + 4s, and 2s respectively. + """ + + interrupt_response: bool + """ + Whether or not to automatically interrupt any ongoing response with output to + the default conversation (i.e. `conversation` of `auto`) when a VAD start event + occurs. + """ + + +RealtimeTranscriptionSessionAudioInputTurnDetectionParam: TypeAlias = Union[ServerVad, SemanticVad] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_param.py new file mode 100644 index 00000000..1503a606 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_audio_param.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +from .realtime_transcription_session_audio_input_param import RealtimeTranscriptionSessionAudioInputParam + +__all__ = ["RealtimeTranscriptionSessionAudioParam"] + + +class RealtimeTranscriptionSessionAudioParam(TypedDict, total=False): + input: RealtimeTranscriptionSessionAudioInputParam diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request.py new file mode 100644 index 00000000..102f2b14 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .realtime_transcription_session_audio import RealtimeTranscriptionSessionAudio + +__all__ = ["RealtimeTranscriptionSessionCreateRequest"] + + +class RealtimeTranscriptionSessionCreateRequest(BaseModel): + type: Literal["transcription"] + """The type of session to create. + + Always `transcription` for transcription sessions. + """ + + audio: Optional[RealtimeTranscriptionSessionAudio] = None + """Configuration for input and output audio.""" + + include: Optional[List[Literal["item.input_audio_transcription.logprobs"]]] = None + """Additional fields to include in server outputs. + + `item.input_audio_transcription.logprobs`: Include logprobs for input audio + transcription. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request_param.py new file mode 100644 index 00000000..80cbe2d4 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_request_param.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +from .realtime_transcription_session_audio_param import RealtimeTranscriptionSessionAudioParam + +__all__ = ["RealtimeTranscriptionSessionCreateRequestParam"] + + +class RealtimeTranscriptionSessionCreateRequestParam(TypedDict, total=False): + type: Required[Literal["transcription"]] + """The type of session to create. + + Always `transcription` for transcription sessions. + """ + + audio: RealtimeTranscriptionSessionAudioParam + """Configuration for input and output audio.""" + + include: List[Literal["item.input_audio_transcription.logprobs"]] + """Additional fields to include in server outputs. + + `item.input_audio_transcription.logprobs`: Include logprobs for input audio + transcription. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_response.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_response.py new file mode 100644 index 00000000..301af1ac --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_create_response.py @@ -0,0 +1,68 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .audio_transcription import AudioTranscription +from .noise_reduction_type import NoiseReductionType +from .realtime_audio_formats import RealtimeAudioFormats +from .realtime_transcription_session_turn_detection import RealtimeTranscriptionSessionTurnDetection + +__all__ = ["RealtimeTranscriptionSessionCreateResponse", "Audio", "AudioInput", "AudioInputNoiseReduction"] + + +class AudioInputNoiseReduction(BaseModel): + type: Optional[NoiseReductionType] = None + """Type of noise reduction. + + `near_field` is for close-talking microphones such as headphones, `far_field` is + for far-field microphones such as laptop or conference room microphones. + """ + + +class AudioInput(BaseModel): + format: Optional[RealtimeAudioFormats] = None + """The PCM audio format. Only a 24kHz sample rate is supported.""" + + noise_reduction: Optional[AudioInputNoiseReduction] = None + """Configuration for input audio noise reduction.""" + + transcription: Optional[AudioTranscription] = None + """Configuration of the transcription model.""" + + turn_detection: Optional[RealtimeTranscriptionSessionTurnDetection] = None + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + +class Audio(BaseModel): + input: Optional[AudioInput] = None + + +class RealtimeTranscriptionSessionCreateResponse(BaseModel): + id: str + """Unique identifier for the session that looks like `sess_1234567890abcdef`.""" + + object: str + """The object type. Always `realtime.transcription_session`.""" + + type: Literal["transcription"] + """The type of session. Always `transcription` for transcription sessions.""" + + audio: Optional[Audio] = None + """Configuration for input audio for the session.""" + + expires_at: Optional[int] = None + """Expiration timestamp for the session, in seconds since epoch.""" + + include: Optional[List[Literal["item.input_audio_transcription.logprobs"]]] = None + """Additional fields to include in server outputs. + + - `item.input_audio_transcription.logprobs`: Include logprobs for input audio + transcription. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_turn_detection.py b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_turn_detection.py new file mode 100644 index 00000000..f5da31ce --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_transcription_session_turn_detection.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["RealtimeTranscriptionSessionTurnDetection"] + + +class RealtimeTranscriptionSessionTurnDetection(BaseModel): + prefix_padding_ms: Optional[int] = None + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: Optional[float] = None + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: Optional[str] = None + """Type of turn detection, only `server_vad` is currently supported.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_truncation.py b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation.py new file mode 100644 index 00000000..515f8690 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, TypeAlias + +from .realtime_truncation_retention_ratio import RealtimeTruncationRetentionRatio + +__all__ = ["RealtimeTruncation"] + +RealtimeTruncation: TypeAlias = Union[Literal["auto", "disabled"], RealtimeTruncationRetentionRatio] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_param.py new file mode 100644 index 00000000..5e42b274 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypeAlias + +from .realtime_truncation_retention_ratio_param import RealtimeTruncationRetentionRatioParam + +__all__ = ["RealtimeTruncationParam"] + +RealtimeTruncationParam: TypeAlias = Union[Literal["auto", "disabled"], RealtimeTruncationRetentionRatioParam] diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio.py b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio.py new file mode 100644 index 00000000..b4042724 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeTruncationRetentionRatio"] + + +class RealtimeTruncationRetentionRatio(BaseModel): + retention_ratio: float + """ + Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when the + conversation exceeds the input token limit. + """ + + type: Literal["retention_ratio"] + """Use retention ratio truncation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio_param.py b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio_param.py new file mode 100644 index 00000000..b65d6566 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/realtime_truncation_retention_ratio_param.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["RealtimeTruncationRetentionRatioParam"] + + +class RealtimeTruncationRetentionRatioParam(TypedDict, total=False): + retention_ratio: Required[float] + """ + Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when the + conversation exceeds the input token limit. + """ + + type: Required[Literal["retention_ratio"]] + """Use retention ratio truncation.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_audio_delta_event.py b/portkey_ai/_vendor/openai/types/realtime/response_audio_delta_event.py new file mode 100644 index 00000000..d92c5462 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_audio_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseAudioDeltaEvent"] + + +class ResponseAudioDeltaEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + delta: str + """Base64-encoded audio data delta.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.output_audio.delta"] + """The event type, must be `response.output_audio.delta`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_audio_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_audio_done_event.py new file mode 100644 index 00000000..5ea0f07e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_audio_done_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseAudioDoneEvent"] + + +class ResponseAudioDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.output_audio.done"] + """The event type, must be `response.output_audio.done`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_delta_event.py b/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_delta_event.py new file mode 100644 index 00000000..4dd5feca --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseAudioTranscriptDeltaEvent"] + + +class ResponseAudioTranscriptDeltaEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + delta: str + """The transcript delta.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.output_audio_transcript.delta"] + """The event type, must be `response.output_audio_transcript.delta`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_done_event.py new file mode 100644 index 00000000..2de913d2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_audio_transcript_done_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseAudioTranscriptDoneEvent"] + + +class ResponseAudioTranscriptDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + transcript: str + """The final transcript of the audio.""" + + type: Literal["response.output_audio_transcript.done"] + """The event type, must be `response.output_audio_transcript.done`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_cancel_event.py b/portkey_ai/_vendor/openai/types/realtime/response_cancel_event.py new file mode 100644 index 00000000..15dc141c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_cancel_event.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCancelEvent"] + + +class ResponseCancelEvent(BaseModel): + type: Literal["response.cancel"] + """The event type, must be `response.cancel`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" + + response_id: Optional[str] = None + """ + A specific response ID to cancel - if not provided, will cancel an in-progress + response in the default conversation. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/response_cancel_event_param.py b/portkey_ai/_vendor/openai/types/realtime/response_cancel_event_param.py new file mode 100644 index 00000000..f3374073 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_cancel_event_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseCancelEventParam"] + + +class ResponseCancelEventParam(TypedDict, total=False): + type: Required[Literal["response.cancel"]] + """The event type, must be `response.cancel`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" + + response_id: str + """ + A specific response ID to cancel - if not provided, will cancel an in-progress + response in the default conversation. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/response_content_part_added_event.py b/portkey_ai/_vendor/openai/types/realtime/response_content_part_added_event.py new file mode 100644 index 00000000..aca965c3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_content_part_added_event.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseContentPartAddedEvent", "Part"] + + +class Part(BaseModel): + audio: Optional[str] = None + """Base64-encoded audio data (if type is "audio").""" + + text: Optional[str] = None + """The text content (if type is "text").""" + + transcript: Optional[str] = None + """The transcript of the audio (if type is "audio").""" + + type: Optional[Literal["text", "audio"]] = None + """The content type ("text", "audio").""" + + +class ResponseContentPartAddedEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item to which the content part was added.""" + + output_index: int + """The index of the output item in the response.""" + + part: Part + """The content part that was added.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.content_part.added"] + """The event type, must be `response.content_part.added`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_content_part_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_content_part_done_event.py new file mode 100644 index 00000000..59af808a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_content_part_done_event.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseContentPartDoneEvent", "Part"] + + +class Part(BaseModel): + audio: Optional[str] = None + """Base64-encoded audio data (if type is "audio").""" + + text: Optional[str] = None + """The text content (if type is "text").""" + + transcript: Optional[str] = None + """The transcript of the audio (if type is "audio").""" + + type: Optional[Literal["text", "audio"]] = None + """The content type ("text", "audio").""" + + +class ResponseContentPartDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + part: Part + """The content part that is done.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.content_part.done"] + """The event type, must be `response.content_part.done`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_create_event.py b/portkey_ai/_vendor/openai/types/realtime/response_create_event.py new file mode 100644 index 00000000..75a08ee4 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_create_event.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .realtime_response_create_params import RealtimeResponseCreateParams + +__all__ = ["ResponseCreateEvent"] + + +class ResponseCreateEvent(BaseModel): + type: Literal["response.create"] + """The event type, must be `response.create`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" + + response: Optional[RealtimeResponseCreateParams] = None + """Create a new Realtime response with these parameters""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_create_event_param.py b/portkey_ai/_vendor/openai/types/realtime/response_create_event_param.py new file mode 100644 index 00000000..e5dd46d9 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_create_event_param.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .realtime_response_create_params_param import RealtimeResponseCreateParamsParam + +__all__ = ["ResponseCreateEventParam"] + + +class ResponseCreateEventParam(TypedDict, total=False): + type: Required[Literal["response.create"]] + """The event type, must be `response.create`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" + + response: RealtimeResponseCreateParamsParam + """Create a new Realtime response with these parameters""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_created_event.py b/portkey_ai/_vendor/openai/types/realtime/response_created_event.py new file mode 100644 index 00000000..996bf26f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_created_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .realtime_response import RealtimeResponse + +__all__ = ["ResponseCreatedEvent"] + + +class ResponseCreatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + response: RealtimeResponse + """The response resource.""" + + type: Literal["response.created"] + """The event type, must be `response.created`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_done_event.py new file mode 100644 index 00000000..ce9a4b9f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_done_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .realtime_response import RealtimeResponse + +__all__ = ["ResponseDoneEvent"] + + +class ResponseDoneEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + response: RealtimeResponse + """The response resource.""" + + type: Literal["response.done"] + """The event type, must be `response.done`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_delta_event.py b/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_delta_event.py new file mode 100644 index 00000000..6d96e78b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFunctionCallArgumentsDeltaEvent"] + + +class ResponseFunctionCallArgumentsDeltaEvent(BaseModel): + call_id: str + """The ID of the function call.""" + + delta: str + """The arguments delta as a JSON string.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the function call item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.function_call_arguments.delta"] + """The event type, must be `response.function_call_arguments.delta`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_done_event.py new file mode 100644 index 00000000..be7fae9a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_function_call_arguments_done_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFunctionCallArgumentsDoneEvent"] + + +class ResponseFunctionCallArgumentsDoneEvent(BaseModel): + arguments: str + """The final arguments as a JSON string.""" + + call_id: str + """The ID of the function call.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the function call item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.function_call_arguments.done"] + """The event type, must be `response.function_call_arguments.done`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_delta.py b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_delta.py new file mode 100644 index 00000000..0a02a1a5 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_delta.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpCallArgumentsDelta"] + + +class ResponseMcpCallArgumentsDelta(BaseModel): + delta: str + """The JSON-encoded arguments delta.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the MCP tool call item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.mcp_call_arguments.delta"] + """The event type, must be `response.mcp_call_arguments.delta`.""" + + obfuscation: Optional[str] = None + """If present, indicates the delta text was obfuscated.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_done.py b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_done.py new file mode 100644 index 00000000..5ec95f17 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_arguments_done.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpCallArgumentsDone"] + + +class ResponseMcpCallArgumentsDone(BaseModel): + arguments: str + """The final JSON-encoded arguments string.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the MCP tool call item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.mcp_call_arguments.done"] + """The event type, must be `response.mcp_call_arguments.done`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_completed.py b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_completed.py new file mode 100644 index 00000000..e3fcec21 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_completed.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpCallCompleted"] + + +class ResponseMcpCallCompleted(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the MCP tool call item.""" + + output_index: int + """The index of the output item in the response.""" + + type: Literal["response.mcp_call.completed"] + """The event type, must be `response.mcp_call.completed`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_failed.py b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_failed.py new file mode 100644 index 00000000..b7adc8c2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_failed.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpCallFailed"] + + +class ResponseMcpCallFailed(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the MCP tool call item.""" + + output_index: int + """The index of the output item in the response.""" + + type: Literal["response.mcp_call.failed"] + """The event type, must be `response.mcp_call.failed`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_in_progress.py b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_in_progress.py new file mode 100644 index 00000000..d0fcc761 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_mcp_call_in_progress.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpCallInProgress"] + + +class ResponseMcpCallInProgress(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the MCP tool call item.""" + + output_index: int + """The index of the output item in the response.""" + + type: Literal["response.mcp_call.in_progress"] + """The event type, must be `response.mcp_call.in_progress`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_output_item_added_event.py b/portkey_ai/_vendor/openai/types/realtime/response_output_item_added_event.py new file mode 100644 index 00000000..509dfcae --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_output_item_added_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ResponseOutputItemAddedEvent"] + + +class ResponseOutputItemAddedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """A single item within a Realtime conversation.""" + + output_index: int + """The index of the output item in the Response.""" + + response_id: str + """The ID of the Response to which the item belongs.""" + + type: Literal["response.output_item.added"] + """The event type, must be `response.output_item.added`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_output_item_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_output_item_done_event.py new file mode 100644 index 00000000..800e4ae8 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_output_item_done_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ResponseOutputItemDoneEvent"] + + +class ResponseOutputItemDoneEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """A single item within a Realtime conversation.""" + + output_index: int + """The index of the output item in the Response.""" + + response_id: str + """The ID of the Response to which the item belongs.""" + + type: Literal["response.output_item.done"] + """The event type, must be `response.output_item.done`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_text_delta_event.py b/portkey_ai/_vendor/openai/types/realtime/response_text_delta_event.py new file mode 100644 index 00000000..493348aa --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_text_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseTextDeltaEvent"] + + +class ResponseTextDeltaEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + delta: str + """The text delta.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.output_text.delta"] + """The event type, must be `response.output_text.delta`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/response_text_done_event.py b/portkey_ai/_vendor/openai/types/realtime/response_text_done_event.py new file mode 100644 index 00000000..83c6cf06 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/response_text_done_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseTextDoneEvent"] + + +class ResponseTextDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + text: str + """The final text content.""" + + type: Literal["response.output_text.done"] + """The event type, must be `response.output_text.done`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/session_created_event.py b/portkey_ai/_vendor/openai/types/realtime/session_created_event.py new file mode 100644 index 00000000..b5caad35 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/session_created_event.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from .realtime_session_create_request import RealtimeSessionCreateRequest +from .realtime_transcription_session_create_request import RealtimeTranscriptionSessionCreateRequest + +__all__ = ["SessionCreatedEvent", "Session"] + +Session: TypeAlias = Union[RealtimeSessionCreateRequest, RealtimeTranscriptionSessionCreateRequest] + + +class SessionCreatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + session: Session + """The session configuration.""" + + type: Literal["session.created"] + """The event type, must be `session.created`.""" diff --git a/portkey_ai/_vendor/openai/types/realtime/session_update_event.py b/portkey_ai/_vendor/openai/types/realtime/session_update_event.py new file mode 100644 index 00000000..2e226162 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/session_update_event.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from .realtime_session_create_request import RealtimeSessionCreateRequest +from .realtime_transcription_session_create_request import RealtimeTranscriptionSessionCreateRequest + +__all__ = ["SessionUpdateEvent", "Session"] + +Session: TypeAlias = Union[RealtimeSessionCreateRequest, RealtimeTranscriptionSessionCreateRequest] + + +class SessionUpdateEvent(BaseModel): + session: Session + """Update the Realtime session. + + Choose either a realtime session or a transcription session. + """ + + type: Literal["session.update"] + """The event type, must be `session.update`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event. + + This is an arbitrary string that a client may assign. It will be passed back if + there is an error with the event, but the corresponding `session.updated` event + will not include it. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/session_update_event_param.py b/portkey_ai/_vendor/openai/types/realtime/session_update_event_param.py new file mode 100644 index 00000000..59623614 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/session_update_event_param.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .realtime_session_create_request_param import RealtimeSessionCreateRequestParam +from .realtime_transcription_session_create_request_param import RealtimeTranscriptionSessionCreateRequestParam + +__all__ = ["SessionUpdateEventParam", "Session"] + +Session: TypeAlias = Union[RealtimeSessionCreateRequestParam, RealtimeTranscriptionSessionCreateRequestParam] + + +class SessionUpdateEventParam(TypedDict, total=False): + session: Required[Session] + """Update the Realtime session. + + Choose either a realtime session or a transcription session. + """ + + type: Required[Literal["session.update"]] + """The event type, must be `session.update`.""" + + event_id: str + """Optional client-generated ID used to identify this event. + + This is an arbitrary string that a client may assign. It will be passed back if + there is an error with the event, but the corresponding `session.updated` event + will not include it. + """ diff --git a/portkey_ai/_vendor/openai/types/realtime/session_updated_event.py b/portkey_ai/_vendor/openai/types/realtime/session_updated_event.py new file mode 100644 index 00000000..eb7ee033 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/realtime/session_updated_event.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from .realtime_session_create_request import RealtimeSessionCreateRequest +from .realtime_transcription_session_create_request import RealtimeTranscriptionSessionCreateRequest + +__all__ = ["SessionUpdatedEvent", "Session"] + +Session: TypeAlias = Union[RealtimeSessionCreateRequest, RealtimeTranscriptionSessionCreateRequest] + + +class SessionUpdatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + session: Session + """The session configuration.""" + + type: Literal["session.updated"] + """The event type, must be `session.updated`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/__init__.py b/portkey_ai/_vendor/openai/types/responses/__init__.py index d33c26d2..d59f0a74 100644 --- a/portkey_ai/_vendor/openai/types/responses/__init__.py +++ b/portkey_ai/_vendor/openai/types/responses/__init__.py @@ -5,6 +5,7 @@ from .tool import Tool as Tool from .response import Response as Response from .tool_param import ToolParam as ToolParam +from .custom_tool import CustomTool as CustomTool from .computer_tool import ComputerTool as ComputerTool from .function_tool import FunctionTool as FunctionTool from .response_item import ResponseItem as ResponseItem @@ -18,19 +19,26 @@ ParsedResponseOutputMessage as ParsedResponseOutputMessage, ParsedResponseFunctionToolCall as ParsedResponseFunctionToolCall, ) +from .response_prompt import ResponsePrompt as ResponsePrompt from .response_status import ResponseStatus as ResponseStatus +from .tool_choice_mcp import ToolChoiceMcp as ToolChoiceMcp from .web_search_tool import WebSearchTool as WebSearchTool from .file_search_tool import FileSearchTool as FileSearchTool +from .custom_tool_param import CustomToolParam as CustomToolParam from .tool_choice_types import ToolChoiceTypes as ToolChoiceTypes from .easy_input_message import EasyInputMessage as EasyInputMessage from .response_item_list import ResponseItemList as ResponseItemList +from .tool_choice_custom import ToolChoiceCustom as ToolChoiceCustom from .computer_tool_param import ComputerToolParam as ComputerToolParam from .function_tool_param import FunctionToolParam as FunctionToolParam from .response_includable import ResponseIncludable as ResponseIncludable from .response_input_file import ResponseInputFile as ResponseInputFile +from .response_input_item import ResponseInputItem as ResponseInputItem from .response_input_text import ResponseInputText as ResponseInputText +from .tool_choice_allowed import ToolChoiceAllowed as ToolChoiceAllowed from .tool_choice_options import ToolChoiceOptions as ToolChoiceOptions from .response_error_event import ResponseErrorEvent as ResponseErrorEvent +from .response_input_audio import ResponseInputAudio as ResponseInputAudio from .response_input_image import ResponseInputImage as ResponseInputImage from .response_input_param import ResponseInputParam as ResponseInputParam from .response_output_item import ResponseOutputItem as ResponseOutputItem @@ -38,8 +46,10 @@ from .response_text_config import ResponseTextConfig as ResponseTextConfig from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent +from .response_prompt_param import ResponsePromptParam as ResponsePromptParam from .response_queued_event import ResponseQueuedEvent as ResponseQueuedEvent from .response_stream_event import ResponseStreamEvent as ResponseStreamEvent +from .tool_choice_mcp_param import ToolChoiceMcpParam as ToolChoiceMcpParam from .web_search_tool_param import WebSearchToolParam as WebSearchToolParam from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam from .input_item_list_params import InputItemListParams as InputItemListParams @@ -50,23 +60,29 @@ from .response_output_refusal import ResponseOutputRefusal as ResponseOutputRefusal from .response_reasoning_item import ResponseReasoningItem as ResponseReasoningItem from .tool_choice_types_param import ToolChoiceTypesParam as ToolChoiceTypesParam +from .web_search_preview_tool import WebSearchPreviewTool as WebSearchPreviewTool from .easy_input_message_param import EasyInputMessageParam as EasyInputMessageParam from .response_completed_event import ResponseCompletedEvent as ResponseCompletedEvent from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent +from .tool_choice_custom_param import ToolChoiceCustomParam as ToolChoiceCustomParam from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent +from .response_custom_tool_call import ResponseCustomToolCall as ResponseCustomToolCall from .response_incomplete_event import ResponseIncompleteEvent as ResponseIncompleteEvent from .response_input_file_param import ResponseInputFileParam as ResponseInputFileParam from .response_input_item_param import ResponseInputItemParam as ResponseInputItemParam from .response_input_text_param import ResponseInputTextParam as ResponseInputTextParam from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent +from .tool_choice_allowed_param import ToolChoiceAllowedParam as ToolChoiceAllowedParam from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent +from .response_input_audio_param import ResponseInputAudioParam as ResponseInputAudioParam from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall +from .response_conversation_param import ResponseConversationParam as ResponseConversationParam from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig from .response_function_tool_call import ResponseFunctionToolCall as ResponseFunctionToolCall from .response_input_message_item import ResponseInputMessageItem as ResponseInputMessageItem @@ -76,13 +92,14 @@ from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam -from .response_reasoning_done_event import ResponseReasoningDoneEvent as ResponseReasoningDoneEvent from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam +from .web_search_preview_tool_param import WebSearchPreviewToolParam as WebSearchPreviewToolParam from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent as ResponseMcpCallFailedEvent -from .response_reasoning_delta_event import ResponseReasoningDeltaEvent as ResponseReasoningDeltaEvent +from .response_custom_tool_call_param import ResponseCustomToolCallParam as ResponseCustomToolCallParam from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent +from .response_custom_tool_call_output import ResponseCustomToolCallOutput as ResponseCustomToolCallOutput from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent from .response_computer_tool_call_param import ResponseComputerToolCallParam as ResponseComputerToolCallParam @@ -91,24 +108,23 @@ from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent as ResponseMcpCallCompletedEvent from .response_function_web_search_param import ResponseFunctionWebSearchParam as ResponseFunctionWebSearchParam +from .response_reasoning_text_done_event import ResponseReasoningTextDoneEvent as ResponseReasoningTextDoneEvent from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall from .response_input_message_content_list import ResponseInputMessageContentList as ResponseInputMessageContentList from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent as ResponseMcpCallInProgressEvent +from .response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent as ResponseReasoningTextDeltaEvent from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam as ResponseFileSearchToolCallParam from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent as ResponseMcpListToolsFailedEvent from .response_audio_transcript_delta_event import ( ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, ) -from .response_reasoning_summary_done_event import ( - ResponseReasoningSummaryDoneEvent as ResponseReasoningSummaryDoneEvent, +from .response_custom_tool_call_output_param import ( + ResponseCustomToolCallOutputParam as ResponseCustomToolCallOutputParam, ) from .response_mcp_call_arguments_done_event import ( ResponseMcpCallArgumentsDoneEvent as ResponseMcpCallArgumentsDoneEvent, ) -from .response_reasoning_summary_delta_event import ( - ResponseReasoningSummaryDeltaEvent as ResponseReasoningSummaryDeltaEvent, -) from .response_computer_tool_call_output_item import ( ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem, ) @@ -154,6 +170,9 @@ from .response_mcp_list_tools_in_progress_event import ( ResponseMcpListToolsInProgressEvent as ResponseMcpListToolsInProgressEvent, ) +from .response_custom_tool_call_input_done_event import ( + ResponseCustomToolCallInputDoneEvent as ResponseCustomToolCallInputDoneEvent, +) from .response_reasoning_summary_part_done_event import ( ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent, ) @@ -163,6 +182,9 @@ from .response_web_search_call_in_progress_event import ( ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent, ) +from .response_custom_tool_call_input_delta_event import ( + ResponseCustomToolCallInputDeltaEvent as ResponseCustomToolCallInputDeltaEvent, +) from .response_file_search_call_in_progress_event import ( ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent, ) diff --git a/portkey_ai/_vendor/openai/types/responses/custom_tool.py b/portkey_ai/_vendor/openai/types/responses/custom_tool.py new file mode 100644 index 00000000..c16ae715 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/custom_tool.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.custom_tool_input_format import CustomToolInputFormat + +__all__ = ["CustomTool"] + + +class CustomTool(BaseModel): + name: str + """The name of the custom tool, used to identify it in tool calls.""" + + type: Literal["custom"] + """The type of the custom tool. Always `custom`.""" + + description: Optional[str] = None + """Optional description of the custom tool, used to provide more context.""" + + format: Optional[CustomToolInputFormat] = None + """The input format for the custom tool. Default is unconstrained text.""" diff --git a/portkey_ai/_vendor/openai/types/responses/custom_tool_param.py b/portkey_ai/_vendor/openai/types/responses/custom_tool_param.py new file mode 100644 index 00000000..2afc8b19 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/custom_tool_param.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from ..shared_params.custom_tool_input_format import CustomToolInputFormat + +__all__ = ["CustomToolParam"] + + +class CustomToolParam(TypedDict, total=False): + name: Required[str] + """The name of the custom tool, used to identify it in tool calls.""" + + type: Required[Literal["custom"]] + """The type of the custom tool. Always `custom`.""" + + description: str + """Optional description of the custom tool, used to provide more context.""" + + format: CustomToolInputFormat + """The input format for the custom tool. Default is unconstrained text.""" diff --git a/portkey_ai/_vendor/openai/types/responses/file_search_tool_param.py b/portkey_ai/_vendor/openai/types/responses/file_search_tool_param.py index 2851fae4..c7641c1b 100644 --- a/portkey_ai/_vendor/openai/types/responses/file_search_tool_param.py +++ b/portkey_ai/_vendor/openai/types/responses/file_search_tool_param.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Union, Optional +from typing import Union, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr from ..shared_params.compound_filter import CompoundFilter from ..shared_params.comparison_filter import ComparisonFilter @@ -29,7 +30,7 @@ class FileSearchToolParam(TypedDict, total=False): type: Required[Literal["file_search"]] """The type of the file search tool. Always `file_search`.""" - vector_store_ids: Required[List[str]] + vector_store_ids: Required[SequenceNotStr[str]] """The IDs of the vector stores to search.""" filters: Optional[Filters] diff --git a/portkey_ai/_vendor/openai/types/responses/input_item_list_params.py b/portkey_ai/_vendor/openai/types/responses/input_item_list_params.py index 6a18d920..44a8dc5d 100644 --- a/portkey_ai/_vendor/openai/types/responses/input_item_list_params.py +++ b/portkey_ai/_vendor/openai/types/responses/input_item_list_params.py @@ -14,9 +14,6 @@ class InputItemListParams(TypedDict, total=False): after: str """An item ID to list items after, used in pagination.""" - before: str - """An item ID to list items before, used in pagination.""" - include: List[ResponseIncludable] """Additional fields to include in the response. diff --git a/portkey_ai/_vendor/openai/types/responses/parsed_response.py b/portkey_ai/_vendor/openai/types/responses/parsed_response.py index e59e86d2..1d9db361 100644 --- a/portkey_ai/_vendor/openai/types/responses/parsed_response.py +++ b/portkey_ai/_vendor/openai/types/responses/parsed_response.py @@ -19,6 +19,7 @@ from .response_output_message import ResponseOutputMessage from .response_output_refusal import ResponseOutputRefusal from .response_reasoning_item import ResponseReasoningItem +from .response_custom_tool_call import ResponseCustomToolCall from .response_computer_tool_call import ResponseComputerToolCall from .response_function_tool_call import ResponseFunctionToolCall from .response_function_web_search import ResponseFunctionWebSearch @@ -73,6 +74,7 @@ class ParsedResponseFunctionToolCall(ResponseFunctionToolCall): LocalShellCallAction, McpListTools, ResponseCodeInterpreterToolCall, + ResponseCustomToolCall, ], PropertyInfo(discriminator="type"), ] diff --git a/portkey_ai/_vendor/openai/types/responses/response.py b/portkey_ai/_vendor/openai/types/responses/response.py index 441b3454..423b6f20 100644 --- a/portkey_ai/_vendor/openai/types/responses/response.py +++ b/portkey_ai/_vendor/openai/types/responses/response.py @@ -7,17 +7,22 @@ from ..._models import BaseModel from .response_error import ResponseError from .response_usage import ResponseUsage +from .response_prompt import ResponsePrompt from .response_status import ResponseStatus +from .tool_choice_mcp import ToolChoiceMcp from ..shared.metadata import Metadata from ..shared.reasoning import Reasoning from .tool_choice_types import ToolChoiceTypes +from .tool_choice_custom import ToolChoiceCustom +from .response_input_item import ResponseInputItem +from .tool_choice_allowed import ToolChoiceAllowed from .tool_choice_options import ToolChoiceOptions from .response_output_item import ResponseOutputItem from .response_text_config import ResponseTextConfig from .tool_choice_function import ToolChoiceFunction from ..shared.responses_model import ResponsesModel -__all__ = ["Response", "IncompleteDetails", "ToolChoice"] +__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Conversation"] class IncompleteDetails(BaseModel): @@ -25,7 +30,14 @@ class IncompleteDetails(BaseModel): """The reason why the response is incomplete.""" -ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypes, ToolChoiceFunction] +ToolChoice: TypeAlias = Union[ + ToolChoiceOptions, ToolChoiceAllowed, ToolChoiceTypes, ToolChoiceFunction, ToolChoiceMcp, ToolChoiceCustom +] + + +class Conversation(BaseModel): + id: str + """The unique ID of the conversation.""" class Response(BaseModel): @@ -41,10 +53,8 @@ class Response(BaseModel): incomplete_details: Optional[IncompleteDetails] = None """Details about why the response is incomplete.""" - instructions: Optional[str] = None - """ - Inserts a system (or developer) message as the first item in the model's - context. + instructions: Union[str, List[ResponseInputItem], None] = None + """A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -106,7 +116,7 @@ class Response(BaseModel): You can specify which tool to use by setting the `tool_choice` parameter. - The two categories of tools you can provide the model are: + We support the following categories of tools: - **Built-in tools**: Tools that are provided by OpenAI that extend the model's capabilities, like @@ -114,9 +124,14 @@ class Response(BaseModel): [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). + - **MCP Tools**: Integrations with third-party systems via custom MCP servers or + predefined connectors such as Google Drive and SharePoint. Learn more about + [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. """ top_p: Optional[float] = None @@ -129,11 +144,18 @@ class Response(BaseModel): """ background: Optional[bool] = None - """Whether to run the model response in the background. - + """ + Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). """ + conversation: Optional[Conversation] = None + """The conversation that this response belongs to. + + Input items and output items from this response are automatically added to this + conversation. + """ + max_output_tokens: Optional[int] = None """ An upper bound for the number of tokens that can be generated for a response, @@ -141,39 +163,68 @@ class Response(BaseModel): [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). """ + max_tool_calls: Optional[int] = None + """ + The maximum number of total calls to built-in tools that can be processed in a + response. This maximum number applies across all built-in tool calls, not per + individual tool. Any further attempts to call a tool by the model will be + ignored. + """ + previous_response_id: Optional[str] = None """The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + """ + + prompt: Optional[ResponsePrompt] = None + """Reference to a prompt template and its variables. + + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + + prompt_cache_key: Optional[str] = None + """ + Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ reasoning: Optional[Reasoning] = None - """**o-series models only** + """**gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). """ - service_tier: Optional[Literal["auto", "default", "flex"]] = None - """Specifies the latency tier to use for processing the request. - - This parameter is relevant for customers subscribed to the scale tier service: + safety_identifier: Optional[str] = None + """ + A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + """ - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None + """Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. """ status: Optional[ResponseStatus] = None @@ -192,13 +243,19 @@ class Response(BaseModel): - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) """ + top_logprobs: Optional[int] = None + """ + An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + """ + truncation: Optional[Literal["auto", "disabled"]] = None """The truncation strategy to use for the model response. - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size + - `auto`: If the input to this Response exceeds the model's context window size, + the model will truncate the response to fit the context window by dropping + items from the beginning of the conversation. + - `disabled` (default): If the input size will exceed the context window size for a model, the request will fail with a 400 error. """ @@ -209,17 +266,17 @@ class Response(BaseModel): """ user: Optional[str] = None - """A stable identifier for your end-users. + """This field is being replaced by `safety_identifier` and `prompt_cache_key`. - Used to boost cache hit rates by better bucketing similar requests and to help - OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + Use `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ @property def output_text(self) -> str: - """Convenience property that aggregates all `output_text` items from the `output` - list. + """Convenience property that aggregates all `output_text` items from the `output` list. If no `output_text` content blocks exist, then an empty string is returned. """ diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_delta_event.py index d2224315..c5fef939 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_delta_event.py @@ -9,13 +9,19 @@ class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel): delta: str - """The partial code snippet added by the code interpreter.""" + """The partial code snippet being streamed by the code interpreter.""" + + item_id: str + """The unique identifier of the code interpreter tool call item.""" output_index: int - """The index of the output item that the code interpreter call is in progress.""" + """ + The index of the output item in the response for which the code is being + streamed. + """ sequence_number: int - """The sequence number of this event.""" + """The sequence number of this event, used to order streaming events.""" type: Literal["response.code_interpreter_call_code.delta"] """The type of the event. Always `response.code_interpreter_call_code.delta`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_done_event.py index 1ce6796a..5201a02d 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_code_done_event.py @@ -11,11 +11,14 @@ class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel): code: str """The final code snippet output by the code interpreter.""" + item_id: str + """The unique identifier of the code interpreter tool call item.""" + output_index: int - """The index of the output item that the code interpreter call is in progress.""" + """The index of the output item in the response for which the code is finalized.""" sequence_number: int - """The sequence number of this event.""" + """The sequence number of this event, used to order streaming events.""" type: Literal["response.code_interpreter_call_code.done"] """The type of the event. Always `response.code_interpreter_call_code.done`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_completed_event.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_completed_event.py index 3a3a7189..bb9563a1 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_completed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_completed_event.py @@ -3,20 +3,22 @@ from typing_extensions import Literal from ..._models import BaseModel -from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall __all__ = ["ResponseCodeInterpreterCallCompletedEvent"] class ResponseCodeInterpreterCallCompletedEvent(BaseModel): - code_interpreter_call: ResponseCodeInterpreterToolCall - """A tool call to run code.""" + item_id: str + """The unique identifier of the code interpreter tool call item.""" output_index: int - """The index of the output item that the code interpreter call is in progress.""" + """ + The index of the output item in the response for which the code interpreter call + is completed. + """ sequence_number: int - """The sequence number of this event.""" + """The sequence number of this event, used to order streaming events.""" type: Literal["response.code_interpreter_call.completed"] """The type of the event. Always `response.code_interpreter_call.completed`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_in_progress_event.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_in_progress_event.py index d1c82309..9c6b2210 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_in_progress_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_in_progress_event.py @@ -3,20 +3,22 @@ from typing_extensions import Literal from ..._models import BaseModel -from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall __all__ = ["ResponseCodeInterpreterCallInProgressEvent"] class ResponseCodeInterpreterCallInProgressEvent(BaseModel): - code_interpreter_call: ResponseCodeInterpreterToolCall - """A tool call to run code.""" + item_id: str + """The unique identifier of the code interpreter tool call item.""" output_index: int - """The index of the output item that the code interpreter call is in progress.""" + """ + The index of the output item in the response for which the code interpreter call + is in progress. + """ sequence_number: int - """The sequence number of this event.""" + """The sequence number of this event, used to order streaming events.""" type: Literal["response.code_interpreter_call.in_progress"] """The type of the event. Always `response.code_interpreter_call.in_progress`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_interpreting_event.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_interpreting_event.py index 7f4d294f..f6191e41 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_interpreting_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_call_interpreting_event.py @@ -3,20 +3,22 @@ from typing_extensions import Literal from ..._models import BaseModel -from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall __all__ = ["ResponseCodeInterpreterCallInterpretingEvent"] class ResponseCodeInterpreterCallInterpretingEvent(BaseModel): - code_interpreter_call: ResponseCodeInterpreterToolCall - """A tool call to run code.""" + item_id: str + """The unique identifier of the code interpreter tool call item.""" output_index: int - """The index of the output item that the code interpreter call is in progress.""" + """ + The index of the output item in the response for which the code interpreter is + interpreting code. + """ sequence_number: int - """The sequence number of this event.""" + """The sequence number of this event, used to order streaming events.""" type: Literal["response.code_interpreter_call.interpreting"] """The type of the event. Always `response.code_interpreter_call.interpreting`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call.py index 762542f3..25793711 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call.py @@ -6,50 +6,50 @@ from ..._utils import PropertyInfo from ..._models import BaseModel -__all__ = ["ResponseCodeInterpreterToolCall", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"] +__all__ = ["ResponseCodeInterpreterToolCall", "Output", "OutputLogs", "OutputImage"] -class ResultLogs(BaseModel): +class OutputLogs(BaseModel): logs: str - """The logs of the code interpreter tool call.""" + """The logs output from the code interpreter.""" type: Literal["logs"] - """The type of the code interpreter text output. Always `logs`.""" + """The type of the output. Always 'logs'.""" -class ResultFilesFile(BaseModel): - file_id: str - """The ID of the file.""" +class OutputImage(BaseModel): + type: Literal["image"] + """The type of the output. Always 'image'.""" - mime_type: str - """The MIME type of the file.""" + url: str + """The URL of the image output from the code interpreter.""" -class ResultFiles(BaseModel): - files: List[ResultFilesFile] - - type: Literal["files"] - """The type of the code interpreter file output. Always `files`.""" - - -Result: TypeAlias = Annotated[Union[ResultLogs, ResultFiles], PropertyInfo(discriminator="type")] +Output: TypeAlias = Annotated[Union[OutputLogs, OutputImage], PropertyInfo(discriminator="type")] class ResponseCodeInterpreterToolCall(BaseModel): id: str """The unique ID of the code interpreter tool call.""" - code: str - """The code to run.""" + code: Optional[str] = None + """The code to run, or null if not available.""" + + container_id: str + """The ID of the container used to run the code.""" - results: List[Result] - """The results of the code interpreter tool call.""" + outputs: Optional[List[Output]] = None + """The outputs generated by the code interpreter, such as logs or images. - status: Literal["in_progress", "interpreting", "completed"] - """The status of the code interpreter tool call.""" + Can be null if no outputs are available. + """ + + status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"] + """The status of the code interpreter tool call. + + Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and + `failed`. + """ type: Literal["code_interpreter_call"] """The type of the code interpreter tool call. Always `code_interpreter_call`.""" - - container_id: Optional[str] = None - """The ID of the container used to run the code.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call_param.py index be0f909a..43509100 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_code_interpreter_tool_call_param.py @@ -2,53 +2,53 @@ from __future__ import annotations -from typing import Union, Iterable +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict -__all__ = ["ResponseCodeInterpreterToolCallParam", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"] +__all__ = ["ResponseCodeInterpreterToolCallParam", "Output", "OutputLogs", "OutputImage"] -class ResultLogs(TypedDict, total=False): +class OutputLogs(TypedDict, total=False): logs: Required[str] - """The logs of the code interpreter tool call.""" + """The logs output from the code interpreter.""" type: Required[Literal["logs"]] - """The type of the code interpreter text output. Always `logs`.""" + """The type of the output. Always 'logs'.""" -class ResultFilesFile(TypedDict, total=False): - file_id: Required[str] - """The ID of the file.""" +class OutputImage(TypedDict, total=False): + type: Required[Literal["image"]] + """The type of the output. Always 'image'.""" - mime_type: Required[str] - """The MIME type of the file.""" + url: Required[str] + """The URL of the image output from the code interpreter.""" -class ResultFiles(TypedDict, total=False): - files: Required[Iterable[ResultFilesFile]] - - type: Required[Literal["files"]] - """The type of the code interpreter file output. Always `files`.""" - - -Result: TypeAlias = Union[ResultLogs, ResultFiles] +Output: TypeAlias = Union[OutputLogs, OutputImage] class ResponseCodeInterpreterToolCallParam(TypedDict, total=False): id: Required[str] """The unique ID of the code interpreter tool call.""" - code: Required[str] - """The code to run.""" + code: Required[Optional[str]] + """The code to run, or null if not available.""" + + container_id: Required[str] + """The ID of the container used to run the code.""" - results: Required[Iterable[Result]] - """The results of the code interpreter tool call.""" + outputs: Required[Optional[Iterable[Output]]] + """The outputs generated by the code interpreter, such as logs or images. - status: Required[Literal["in_progress", "interpreting", "completed"]] - """The status of the code interpreter tool call.""" + Can be null if no outputs are available. + """ + + status: Required[Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]] + """The status of the code interpreter tool call. + + Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and + `failed`. + """ type: Required[Literal["code_interpreter_call"]] """The type of the code interpreter tool call. Always `code_interpreter_call`.""" - - container_id: str - """The ID of the container used to run the code.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py index d4ef56ab..0be63db2 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_computer_tool_call_param.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import List, Union, Iterable +from typing import Union, Iterable from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr + __all__ = [ "ResponseComputerToolCallParam", "Action", @@ -86,7 +88,7 @@ class ActionDrag(TypedDict, total=False): class ActionKeypress(TypedDict, total=False): - keys: Required[List[str]] + keys: Required[SequenceNotStr[str]] """The combination of keys the model is requesting to be pressed. This is an array of strings, each representing a key. diff --git a/portkey_ai/_vendor/openai/types/responses/response_conversation_param.py b/portkey_ai/_vendor/openai/types/responses/response_conversation_param.py new file mode 100644 index 00000000..067bdc7a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_conversation_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["ResponseConversationParam"] + + +class ResponseConversationParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the conversation.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_create_params.py b/portkey_ai/_vendor/openai/types/responses/response_create_params.py index 1abc2ccb..af0d5e74 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_create_params.py +++ b/portkey_ai/_vendor/openai/types/responses/response_create_params.py @@ -9,15 +9,22 @@ from .response_includable import ResponseIncludable from .tool_choice_options import ToolChoiceOptions from .response_input_param import ResponseInputParam +from .response_prompt_param import ResponsePromptParam +from .tool_choice_mcp_param import ToolChoiceMcpParam from ..shared_params.metadata import Metadata from .tool_choice_types_param import ToolChoiceTypesParam from ..shared_params.reasoning import Reasoning +from .tool_choice_custom_param import ToolChoiceCustomParam +from .tool_choice_allowed_param import ToolChoiceAllowedParam from .response_text_config_param import ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam +from .response_conversation_param import ResponseConversationParam from ..shared_params.responses_model import ResponsesModel __all__ = [ "ResponseCreateParamsBase", + "Conversation", + "StreamOptions", "ToolChoice", "ResponseCreateParamsNonStreaming", "ResponseCreateParamsStreaming", @@ -25,31 +32,18 @@ class ResponseCreateParamsBase(TypedDict, total=False): - input: Required[Union[str, ResponseInputParam]] - """Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) + background: Optional[bool] """ - - model: Required[ResponsesModel] - """Model ID used to generate the response, like `gpt-4o` or `o3`. - - OpenAI offers a wide range of models with different capabilities, performance - characteristics, and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. + Whether to run the model response in the background. + [Learn more](https://platform.openai.com/docs/guides/background). """ - background: Optional[bool] - """Whether to run the model response in the background. + conversation: Optional[Conversation] + """The conversation that this response belongs to. - [Learn more](https://platform.openai.com/docs/guides/background). + Items from this conversation are prepended to `input_items` for this response + request. Input items and output items from this response are automatically added + to this conversation after this response completes. """ include: Optional[List[ResponseIncludable]] @@ -57,24 +51,37 @@ class ResponseCreateParamsBase(TypedDict, total=False): Currently supported values are: - - `file_search_call.results`: Include the search results of the file search tool + - `web_search_call.action.sources`: Include the sources of the web search tool call. - - `message.input_image.image_url`: Include image urls from the input message. + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. - `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in reasoning item outputs. This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly (like when the `store` parameter is set to `false`, or when an organization is enrolled in the zero data retention program). - - `code_interpreter_call.outputs`: Includes the outputs of python code execution - in code interpreter tool call items. """ - instructions: Optional[str] + input: Union[str, ResponseInputParam] + """Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) """ - Inserts a system (or developer) message as the first item in the model's - context. + + instructions: Optional[str] + """A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous response will not be carried over to the next response. This makes it simple to @@ -88,6 +95,14 @@ class ResponseCreateParamsBase(TypedDict, total=False): [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). """ + max_tool_calls: Optional[int] + """ + The maximum number of total calls to built-in tools that can be processed in a + response. This maximum number applies across all built-in tool calls, not per + individual tool. Any further attempts to call a tool by the model will be + ignored. + """ + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. @@ -98,6 +113,15 @@ class ResponseCreateParamsBase(TypedDict, total=False): a maximum length of 512 characters. """ + model: ResponsesModel + """Model ID used to generate the response, like `gpt-4o` or `o3`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + """ + parallel_tool_calls: Optional[bool] """Whether to allow the model to run tool calls in parallel.""" @@ -106,39 +130,63 @@ class ResponseCreateParamsBase(TypedDict, total=False): Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. + """ + + prompt: Optional[ResponsePromptParam] + """Reference to a prompt template and its variables. + + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + + prompt_cache_key: str + """ + Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ reasoning: Optional[Reasoning] - """**o-series models only** + """**gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). """ - service_tier: Optional[Literal["auto", "default", "flex"]] - """Specifies the latency tier to use for processing the request. - - This parameter is relevant for customers subscribed to the scale tier service: + safety_identifier: str + """ + A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + """ - - If set to 'auto', and the Project is Scale tier enabled, the system will - utilize scale tier credits until they are exhausted. - - If set to 'auto', and the Project is not Scale tier enabled, the request will - be processed using the default service tier with a lower uptime SLA and no - latency guarantee. - - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarantee. - - If set to 'flex', the request will be processed with the Flex Processing - service tier. - [Learn more](https://platform.openai.com/docs/guides/flex-processing). + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] + """Specifies the processing type used for serving the request. + + - If set to 'auto', then the request will be processed with the service tier + configured in the Project settings. Unless otherwise configured, the Project + will use 'default'. + - If set to 'default', then the request will be processed with the standard + pricing and performance for the selected model. + - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. + When the `service_tier` parameter is set, the response body will include the + `service_tier` value based on the processing mode actually used to serve the + request. This response value may be different from the value set in the + parameter. """ store: Optional[bool] """Whether to store the generated model response for later retrieval via API.""" + stream_options: Optional[StreamOptions] + """Options for streaming responses. Only set this when you set `stream: true`.""" + temperature: Optional[float] """What sampling temperature to use, between 0 and 2. @@ -168,7 +216,7 @@ class ResponseCreateParamsBase(TypedDict, total=False): You can specify which tool to use by setting the `tool_choice` parameter. - The two categories of tools you can provide the model are: + We support the following categories of tools: - **Built-in tools**: Tools that are provided by OpenAI that extend the model's capabilities, like @@ -176,9 +224,20 @@ class ResponseCreateParamsBase(TypedDict, total=False): [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). + - **MCP Tools**: Integrations with third-party systems via custom MCP servers or + predefined connectors such as Google Drive and SharePoint. Learn more about + [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. + """ + + top_logprobs: Optional[int] + """ + An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. """ top_p: Optional[float] @@ -193,23 +252,47 @@ class ResponseCreateParamsBase(TypedDict, total=False): truncation: Optional[Literal["auto", "disabled"]] """The truncation strategy to use for the model response. - - `auto`: If the context of this response and previous ones exceeds the model's - context window size, the model will truncate the response to fit the context - window by dropping input items in the middle of the conversation. - - `disabled` (default): If a model response will exceed the context window size + - `auto`: If the input to this Response exceeds the model's context window size, + the model will truncate the response to fit the context window by dropping + items from the beginning of the conversation. + - `disabled` (default): If the input size will exceed the context window size for a model, the request will fail with a 400 error. """ user: str - """A stable identifier for your end-users. + """This field is being replaced by `safety_identifier` and `prompt_cache_key`. - Used to boost cache hit rates by better bucketing similar requests and to help - OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + Use `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ -ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypesParam, ToolChoiceFunctionParam] +Conversation: TypeAlias = Union[str, ResponseConversationParam] + + +class StreamOptions(TypedDict, total=False): + include_obfuscation: bool + """When true, stream obfuscation will be enabled. + + Stream obfuscation adds random characters to an `obfuscation` field on streaming + delta events to normalize payload sizes as a mitigation to certain side-channel + attacks. These obfuscation fields are included by default, but add a small + amount of overhead to the data stream. You can set `include_obfuscation` to + false to optimize for bandwidth if you trust the network links between your + application and the OpenAI API. + """ + + +ToolChoice: TypeAlias = Union[ + ToolChoiceOptions, + ToolChoiceAllowedParam, + ToolChoiceTypesParam, + ToolChoiceFunctionParam, + ToolChoiceMcpParam, + ToolChoiceCustomParam, +] class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False): diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call.py new file mode 100644 index 00000000..38c650e6 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCustomToolCall"] + + +class ResponseCustomToolCall(BaseModel): + call_id: str + """An identifier used to map this custom tool call to a tool call output.""" + + input: str + """The input for the custom tool call generated by the model.""" + + name: str + """The name of the custom tool being called.""" + + type: Literal["custom_tool_call"] + """The type of the custom tool call. Always `custom_tool_call`.""" + + id: Optional[str] = None + """The unique ID of the custom tool call in the OpenAI platform.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_delta_event.py new file mode 100644 index 00000000..6c33102d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_delta_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCustomToolCallInputDeltaEvent"] + + +class ResponseCustomToolCallInputDeltaEvent(BaseModel): + delta: str + """The incremental input data (delta) for the custom tool call.""" + + item_id: str + """Unique identifier for the API item associated with this event.""" + + output_index: int + """The index of the output this delta applies to.""" + + sequence_number: int + """The sequence number of this event.""" + + type: Literal["response.custom_tool_call_input.delta"] + """The event type identifier.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_done_event.py new file mode 100644 index 00000000..35a2fee2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_input_done_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCustomToolCallInputDoneEvent"] + + +class ResponseCustomToolCallInputDoneEvent(BaseModel): + input: str + """The complete input data for the custom tool call.""" + + item_id: str + """Unique identifier for the API item associated with this event.""" + + output_index: int + """The index of the output this event applies to.""" + + sequence_number: int + """The sequence number of this event.""" + + type: Literal["response.custom_tool_call_input.done"] + """The event type identifier.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output.py new file mode 100644 index 00000000..a2b4cc30 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCustomToolCallOutput"] + + +class ResponseCustomToolCallOutput(BaseModel): + call_id: str + """The call ID, used to map this custom tool call output to a custom tool call.""" + + output: str + """The output from the custom tool call generated by your code.""" + + type: Literal["custom_tool_call_output"] + """The type of the custom tool call output. Always `custom_tool_call_output`.""" + + id: Optional[str] = None + """The unique ID of the custom tool call output in the OpenAI platform.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output_param.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output_param.py new file mode 100644 index 00000000..d52c5254 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_output_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseCustomToolCallOutputParam"] + + +class ResponseCustomToolCallOutputParam(TypedDict, total=False): + call_id: Required[str] + """The call ID, used to map this custom tool call output to a custom tool call.""" + + output: Required[str] + """The output from the custom tool call generated by your code.""" + + type: Required[Literal["custom_tool_call_output"]] + """The type of the custom tool call output. Always `custom_tool_call_output`.""" + + id: str + """The unique ID of the custom tool call output in the OpenAI platform.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_param.py new file mode 100644 index 00000000..e15beac2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_custom_tool_call_param.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseCustomToolCallParam"] + + +class ResponseCustomToolCallParam(TypedDict, total=False): + call_id: Required[str] + """An identifier used to map this custom tool call to a tool call output.""" + + input: Required[str] + """The input for the custom tool call generated by the model.""" + + name: Required[str] + """The name of the custom tool being called.""" + + type: Required[Literal["custom_tool_call"]] + """The type of the custom tool call. Always `custom_tool_call`.""" + + id: str + """The unique ID of the custom tool call in the OpenAI platform.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call_param.py b/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call_param.py index 9a4177cf..4903dca4 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_file_search_tool_call_param.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict +from ..._types import SequenceNotStr + __all__ = ["ResponseFileSearchToolCallParam", "Result"] @@ -35,7 +37,7 @@ class ResponseFileSearchToolCallParam(TypedDict, total=False): id: Required[str] """The unique ID of the file search tool call.""" - queries: Required[List[str]] + queries: Required[SequenceNotStr[str]] """The queries used to search for files.""" status: Required[Literal["in_progress", "searching", "completed", "incomplete", "failed"]] diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py b/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py index 44734b68..f3e80e6a 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py +++ b/portkey_ai/_vendor/openai/types/responses/response_function_web_search.py @@ -1,16 +1,65 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias +from ..._utils import PropertyInfo from ..._models import BaseModel -__all__ = ["ResponseFunctionWebSearch"] +__all__ = ["ResponseFunctionWebSearch", "Action", "ActionSearch", "ActionSearchSource", "ActionOpenPage", "ActionFind"] + + +class ActionSearchSource(BaseModel): + type: Literal["url"] + """The type of source. Always `url`.""" + + url: str + """The URL of the source.""" + + +class ActionSearch(BaseModel): + query: str + """The search query.""" + + type: Literal["search"] + """The action type.""" + + sources: Optional[List[ActionSearchSource]] = None + """The sources used in the search.""" + + +class ActionOpenPage(BaseModel): + type: Literal["open_page"] + """The action type.""" + + url: str + """The URL opened by the model.""" + + +class ActionFind(BaseModel): + pattern: str + """The pattern or text to search for within the page.""" + + type: Literal["find"] + """The action type.""" + + url: str + """The URL of the page searched for the pattern.""" + + +Action: TypeAlias = Annotated[Union[ActionSearch, ActionOpenPage, ActionFind], PropertyInfo(discriminator="type")] class ResponseFunctionWebSearch(BaseModel): id: str """The unique ID of the web search tool call.""" + action: Action + """ + An object describing the specific action taken in this web search call. Includes + details on how the model used the web (search, open_page, find). + """ + status: Literal["in_progress", "searching", "completed", "failed"] """The status of the web search tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py b/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py index d413e60b..fc019d3e 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_function_web_search_param.py @@ -2,15 +2,70 @@ from __future__ import annotations -from typing_extensions import Literal, Required, TypedDict +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict -__all__ = ["ResponseFunctionWebSearchParam"] +__all__ = [ + "ResponseFunctionWebSearchParam", + "Action", + "ActionSearch", + "ActionSearchSource", + "ActionOpenPage", + "ActionFind", +] + + +class ActionSearchSource(TypedDict, total=False): + type: Required[Literal["url"]] + """The type of source. Always `url`.""" + + url: Required[str] + """The URL of the source.""" + + +class ActionSearch(TypedDict, total=False): + query: Required[str] + """The search query.""" + + type: Required[Literal["search"]] + """The action type.""" + + sources: Iterable[ActionSearchSource] + """The sources used in the search.""" + + +class ActionOpenPage(TypedDict, total=False): + type: Required[Literal["open_page"]] + """The action type.""" + + url: Required[str] + """The URL opened by the model.""" + + +class ActionFind(TypedDict, total=False): + pattern: Required[str] + """The pattern or text to search for within the page.""" + + type: Required[Literal["find"]] + """The action type.""" + + url: Required[str] + """The URL of the page searched for the pattern.""" + + +Action: TypeAlias = Union[ActionSearch, ActionOpenPage, ActionFind] class ResponseFunctionWebSearchParam(TypedDict, total=False): id: Required[str] """The unique ID of the web search tool call.""" + action: Required[Action] + """ + An object describing the specific action taken in this web search call. Includes + details on how the model used the web (search, open_page, find). + """ + status: Required[Literal["in_progress", "searching", "completed", "failed"]] """The status of the web search tool call.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_includable.py b/portkey_ai/_vendor/openai/types/responses/response_includable.py index 28869832..c17a0256 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_includable.py +++ b/portkey_ai/_vendor/openai/types/responses/response_includable.py @@ -5,9 +5,10 @@ __all__ = ["ResponseIncludable"] ResponseIncludable: TypeAlias = Literal[ + "code_interpreter_call.outputs", + "computer_call_output.output.image_url", "file_search_call.results", "message.input_image.image_url", - "computer_call_output.output.image_url", + "message.output_text.logprobs", "reasoning.encrypted_content", - "code_interpreter_call.outputs", ] diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_audio.py b/portkey_ai/_vendor/openai/types/responses/response_input_audio.py new file mode 100644 index 00000000..9fef6de0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_input_audio.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseInputAudio", "InputAudio"] + + +class InputAudio(BaseModel): + data: str + """Base64-encoded audio data.""" + + format: Literal["mp3", "wav"] + """The format of the audio data. Currently supported formats are `mp3` and `wav`.""" + + +class ResponseInputAudio(BaseModel): + input_audio: InputAudio + + type: Literal["input_audio"] + """The type of the input item. Always `input_audio`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_audio_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_audio_param.py new file mode 100644 index 00000000..f3fc913c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_input_audio_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseInputAudioParam", "InputAudio"] + + +class InputAudio(TypedDict, total=False): + data: Required[str] + """Base64-encoded audio data.""" + + format: Required[Literal["mp3", "wav"]] + """The format of the audio data. Currently supported formats are `mp3` and `wav`.""" + + +class ResponseInputAudioParam(TypedDict, total=False): + input_audio: Required[InputAudio] + + type: Required[Literal["input_audio"]] + """The type of the input item. Always `input_audio`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_content.py b/portkey_ai/_vendor/openai/types/responses/response_input_content.py index 1726909a..376b9ffc 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_content.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_content.py @@ -6,10 +6,12 @@ from ..._utils import PropertyInfo from .response_input_file import ResponseInputFile from .response_input_text import ResponseInputText +from .response_input_audio import ResponseInputAudio from .response_input_image import ResponseInputImage __all__ = ["ResponseInputContent"] ResponseInputContent: TypeAlias = Annotated[ - Union[ResponseInputText, ResponseInputImage, ResponseInputFile], PropertyInfo(discriminator="type") + Union[ResponseInputText, ResponseInputImage, ResponseInputFile, ResponseInputAudio], + PropertyInfo(discriminator="type"), ] diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_content_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_content_param.py index 7791cdfd..a95e026a 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_content_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_content_param.py @@ -7,8 +7,11 @@ from .response_input_file_param import ResponseInputFileParam from .response_input_text_param import ResponseInputTextParam +from .response_input_audio_param import ResponseInputAudioParam from .response_input_image_param import ResponseInputImageParam __all__ = ["ResponseInputContentParam"] -ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam] +ResponseInputContentParam: TypeAlias = Union[ + ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam, ResponseInputAudioParam +] diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_file.py b/portkey_ai/_vendor/openai/types/responses/response_input_file.py index 00b35dc8..1eecd6a2 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_file.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_file.py @@ -18,5 +18,8 @@ class ResponseInputFile(BaseModel): file_id: Optional[str] = None """The ID of the file to be sent to the model.""" + file_url: Optional[str] = None + """The URL of the file to be sent to the model.""" + filename: Optional[str] = None """The name of the file to be sent to the model.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_file_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_file_param.py index 61ae46f0..0b5f513e 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_file_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_file_param.py @@ -18,5 +18,8 @@ class ResponseInputFileParam(TypedDict, total=False): file_id: Optional[str] """The ID of the file to be sent to the model.""" + file_url: str + """The URL of the file to be sent to the model.""" + filename: str """The name of the file to be sent to the model.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_item.py b/portkey_ai/_vendor/openai/types/responses/response_input_item.py new file mode 100644 index 00000000..d2b454fd --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_input_item.py @@ -0,0 +1,309 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .easy_input_message import EasyInputMessage +from .response_output_message import ResponseOutputMessage +from .response_reasoning_item import ResponseReasoningItem +from .response_custom_tool_call import ResponseCustomToolCall +from .response_computer_tool_call import ResponseComputerToolCall +from .response_function_tool_call import ResponseFunctionToolCall +from .response_function_web_search import ResponseFunctionWebSearch +from .response_file_search_tool_call import ResponseFileSearchToolCall +from .response_custom_tool_call_output import ResponseCustomToolCallOutput +from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall +from .response_input_message_content_list import ResponseInputMessageContentList +from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot + +__all__ = [ + "ResponseInputItem", + "Message", + "ComputerCallOutput", + "ComputerCallOutputAcknowledgedSafetyCheck", + "FunctionCallOutput", + "ImageGenerationCall", + "LocalShellCall", + "LocalShellCallAction", + "LocalShellCallOutput", + "McpListTools", + "McpListToolsTool", + "McpApprovalRequest", + "McpApprovalResponse", + "McpCall", + "ItemReference", +] + + +class Message(BaseModel): + content: ResponseInputMessageContentList + """ + A list of one or many input items to the model, containing different content + types. + """ + + role: Literal["user", "system", "developer"] + """The role of the message input. One of `user`, `system`, or `developer`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always set to `message`.""" + + +class ComputerCallOutputAcknowledgedSafetyCheck(BaseModel): + id: str + """The ID of the pending safety check.""" + + code: Optional[str] = None + """The type of the pending safety check.""" + + message: Optional[str] = None + """Details about the pending safety check.""" + + +class ComputerCallOutput(BaseModel): + call_id: str + """The ID of the computer tool call that produced the output.""" + + output: ResponseComputerToolCallOutputScreenshot + """A computer screenshot image used with the computer use tool.""" + + type: Literal["computer_call_output"] + """The type of the computer tool call output. Always `computer_call_output`.""" + + id: Optional[str] = None + """The ID of the computer tool call output.""" + + acknowledged_safety_checks: Optional[List[ComputerCallOutputAcknowledgedSafetyCheck]] = None + """ + The safety checks reported by the API that have been acknowledged by the + developer. + """ + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ + + +class FunctionCallOutput(BaseModel): + call_id: str + """The unique ID of the function tool call generated by the model.""" + + output: str + """A JSON string of the output of the function tool call.""" + + type: Literal["function_call_output"] + """The type of the function tool call output. Always `function_call_output`.""" + + id: Optional[str] = None + """The unique ID of the function tool call output. + + Populated when this item is returned via API. + """ + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + +class ImageGenerationCall(BaseModel): + id: str + """The unique ID of the image generation call.""" + + result: Optional[str] = None + """The generated image encoded in base64.""" + + status: Literal["in_progress", "completed", "generating", "failed"] + """The status of the image generation call.""" + + type: Literal["image_generation_call"] + """The type of the image generation call. Always `image_generation_call`.""" + + +class LocalShellCallAction(BaseModel): + command: List[str] + """The command to run.""" + + env: Dict[str, str] + """Environment variables to set for the command.""" + + type: Literal["exec"] + """The type of the local shell action. Always `exec`.""" + + timeout_ms: Optional[int] = None + """Optional timeout in milliseconds for the command.""" + + user: Optional[str] = None + """Optional user to run the command as.""" + + working_directory: Optional[str] = None + """Optional working directory to run the command in.""" + + +class LocalShellCall(BaseModel): + id: str + """The unique ID of the local shell call.""" + + action: LocalShellCallAction + """Execute a shell command on the server.""" + + call_id: str + """The unique ID of the local shell tool call generated by the model.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the local shell call.""" + + type: Literal["local_shell_call"] + """The type of the local shell call. Always `local_shell_call`.""" + + +class LocalShellCallOutput(BaseModel): + id: str + """The unique ID of the local shell tool call generated by the model.""" + + output: str + """A JSON string of the output of the local shell tool call.""" + + type: Literal["local_shell_call_output"] + """The type of the local shell tool call output. Always `local_shell_call_output`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. One of `in_progress`, `completed`, or `incomplete`.""" + + +class McpListToolsTool(BaseModel): + input_schema: object + """The JSON schema describing the tool's input.""" + + name: str + """The name of the tool.""" + + annotations: Optional[object] = None + """Additional annotations about the tool.""" + + description: Optional[str] = None + """The description of the tool.""" + + +class McpListTools(BaseModel): + id: str + """The unique ID of the list.""" + + server_label: str + """The label of the MCP server.""" + + tools: List[McpListToolsTool] + """The tools available on the server.""" + + type: Literal["mcp_list_tools"] + """The type of the item. Always `mcp_list_tools`.""" + + error: Optional[str] = None + """Error message if the server could not list tools.""" + + +class McpApprovalRequest(BaseModel): + id: str + """The unique ID of the approval request.""" + + arguments: str + """A JSON string of arguments for the tool.""" + + name: str + """The name of the tool to run.""" + + server_label: str + """The label of the MCP server making the request.""" + + type: Literal["mcp_approval_request"] + """The type of the item. Always `mcp_approval_request`.""" + + +class McpApprovalResponse(BaseModel): + approval_request_id: str + """The ID of the approval request being answered.""" + + approve: bool + """Whether the request was approved.""" + + type: Literal["mcp_approval_response"] + """The type of the item. Always `mcp_approval_response`.""" + + id: Optional[str] = None + """The unique ID of the approval response""" + + reason: Optional[str] = None + """Optional reason for the decision.""" + + +class McpCall(BaseModel): + id: str + """The unique ID of the tool call.""" + + arguments: str + """A JSON string of the arguments passed to the tool.""" + + name: str + """The name of the tool that was run.""" + + server_label: str + """The label of the MCP server running the tool.""" + + type: Literal["mcp_call"] + """The type of the item. Always `mcp_call`.""" + + error: Optional[str] = None + """The error from the tool call, if any.""" + + output: Optional[str] = None + """The output from the tool call.""" + + +class ItemReference(BaseModel): + id: str + """The ID of the item to reference.""" + + type: Optional[Literal["item_reference"]] = None + """The type of item to reference. Always `item_reference`.""" + + +ResponseInputItem: TypeAlias = Annotated[ + Union[ + EasyInputMessage, + Message, + ResponseOutputMessage, + ResponseFileSearchToolCall, + ResponseComputerToolCall, + ComputerCallOutput, + ResponseFunctionWebSearch, + ResponseFunctionToolCall, + FunctionCallOutput, + ResponseReasoningItem, + ImageGenerationCall, + ResponseCodeInterpreterToolCall, + LocalShellCall, + LocalShellCallOutput, + McpListTools, + McpApprovalRequest, + McpApprovalResponse, + McpCall, + ResponseCustomToolCallOutput, + ResponseCustomToolCall, + ItemReference, + ], + PropertyInfo(discriminator="type"), +] diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py index 70cd9116..5ad83fc0 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_item_param.py @@ -2,16 +2,19 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr from .easy_input_message_param import EasyInputMessageParam from .response_output_message_param import ResponseOutputMessageParam from .response_reasoning_item_param import ResponseReasoningItemParam +from .response_custom_tool_call_param import ResponseCustomToolCallParam from .response_computer_tool_call_param import ResponseComputerToolCallParam from .response_function_tool_call_param import ResponseFunctionToolCallParam from .response_function_web_search_param import ResponseFunctionWebSearchParam from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam +from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam @@ -133,7 +136,7 @@ class ImageGenerationCall(TypedDict, total=False): class LocalShellCallAction(TypedDict, total=False): - command: Required[List[str]] + command: Required[SequenceNotStr[str]] """The command to run.""" env: Required[Dict[str, str]] @@ -298,5 +301,7 @@ class ItemReference(TypedDict, total=False): McpApprovalRequest, McpApprovalResponse, McpCall, + ResponseCustomToolCallOutputParam, + ResponseCustomToolCallParam, ItemReference, ] diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_message_content_list_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_message_content_list_param.py index 080613df..8e3778d1 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_message_content_list_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_message_content_list_param.py @@ -7,10 +7,13 @@ from .response_input_file_param import ResponseInputFileParam from .response_input_text_param import ResponseInputTextParam +from .response_input_audio_param import ResponseInputAudioParam from .response_input_image_param import ResponseInputImageParam __all__ = ["ResponseInputMessageContentListParam", "ResponseInputContentParam"] -ResponseInputContentParam: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam] +ResponseInputContentParam: TypeAlias = Union[ + ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam, ResponseInputAudioParam +] ResponseInputMessageContentListParam: TypeAlias = List[ResponseInputContentParam] diff --git a/portkey_ai/_vendor/openai/types/responses/response_input_param.py b/portkey_ai/_vendor/openai/types/responses/response_input_param.py index 02499867..73eac624 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_input_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_input_param.py @@ -5,13 +5,16 @@ from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..._types import SequenceNotStr from .easy_input_message_param import EasyInputMessageParam from .response_output_message_param import ResponseOutputMessageParam from .response_reasoning_item_param import ResponseReasoningItemParam +from .response_custom_tool_call_param import ResponseCustomToolCallParam from .response_computer_tool_call_param import ResponseComputerToolCallParam from .response_function_tool_call_param import ResponseFunctionToolCallParam from .response_function_web_search_param import ResponseFunctionWebSearchParam from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam +from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam @@ -134,7 +137,7 @@ class ImageGenerationCall(TypedDict, total=False): class LocalShellCallAction(TypedDict, total=False): - command: Required[List[str]] + command: Required[SequenceNotStr[str]] """The command to run.""" env: Required[Dict[str, str]] @@ -299,6 +302,8 @@ class ItemReference(TypedDict, total=False): McpApprovalRequest, McpApprovalResponse, McpCall, + ResponseCustomToolCallOutputParam, + ResponseCustomToolCallParam, ItemReference, ] diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_delta_event.py index d6651e69..54eff383 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_delta_event.py @@ -8,8 +8,11 @@ class ResponseMcpCallArgumentsDeltaEvent(BaseModel): - delta: object - """The partial update to the arguments for the MCP tool call.""" + delta: str + """ + A JSON string containing the partial update to the arguments for the MCP tool + call. + """ item_id: str """The unique identifier of the MCP tool call item being processed.""" @@ -20,5 +23,5 @@ class ResponseMcpCallArgumentsDeltaEvent(BaseModel): sequence_number: int """The sequence number of this event.""" - type: Literal["response.mcp_call.arguments_delta"] - """The type of the event. Always 'response.mcp_call.arguments_delta'.""" + type: Literal["response.mcp_call_arguments.delta"] + """The type of the event. Always 'response.mcp_call_arguments.delta'.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_done_event.py index a7ce46ad..59ce9bc9 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_arguments_done_event.py @@ -8,8 +8,8 @@ class ResponseMcpCallArgumentsDoneEvent(BaseModel): - arguments: object - """The finalized arguments for the MCP tool call.""" + arguments: str + """A JSON string containing the finalized arguments for the MCP tool call.""" item_id: str """The unique identifier of the MCP tool call item being processed.""" @@ -20,5 +20,5 @@ class ResponseMcpCallArgumentsDoneEvent(BaseModel): sequence_number: int """The sequence number of this event.""" - type: Literal["response.mcp_call.arguments_done"] - """The type of the event. Always 'response.mcp_call.arguments_done'.""" + type: Literal["response.mcp_call_arguments.done"] + """The type of the event. Always 'response.mcp_call_arguments.done'.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_completed_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_completed_event.py index 009fbc3c..2fee5dff 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_completed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_completed_event.py @@ -8,6 +8,12 @@ class ResponseMcpCallCompletedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that completed.""" + + output_index: int + """The index of the output item that completed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_failed_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_failed_event.py index e6edc6de..ca41ab71 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_call_failed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_call_failed_event.py @@ -8,6 +8,12 @@ class ResponseMcpCallFailedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that failed.""" + + output_index: int + """The index of the output item that failed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_completed_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_completed_event.py index 6290c3cf..c60ad88e 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_completed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_completed_event.py @@ -8,6 +8,12 @@ class ResponseMcpListToolsCompletedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that produced this output.""" + + output_index: int + """The index of the output item that was processed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_failed_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_failed_event.py index 1f6e325b..0c966c44 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_failed_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_failed_event.py @@ -8,6 +8,12 @@ class ResponseMcpListToolsFailedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that failed.""" + + output_index: int + """The index of the output item that failed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_in_progress_event.py b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_in_progress_event.py index 236e5fe6..f451db1e 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_in_progress_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_mcp_list_tools_in_progress_event.py @@ -8,6 +8,12 @@ class ResponseMcpListToolsInProgressEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that is being processed.""" + + output_index: int + """The index of the output item that is being processed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_item.py b/portkey_ai/_vendor/openai/types/responses/response_output_item.py index 62f8f6fb..2d3ee7b6 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_item.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_item.py @@ -7,6 +7,7 @@ from ..._models import BaseModel from .response_output_message import ResponseOutputMessage from .response_reasoning_item import ResponseReasoningItem +from .response_custom_tool_call import ResponseCustomToolCall from .response_computer_tool_call import ResponseComputerToolCall from .response_function_tool_call import ResponseFunctionToolCall from .response_function_web_search import ResponseFunctionWebSearch @@ -161,6 +162,7 @@ class McpApprovalRequest(BaseModel): McpCall, McpListTools, McpApprovalRequest, + ResponseCustomToolCall, ], PropertyInfo(discriminator="type"), ] diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_refusal.py b/portkey_ai/_vendor/openai/types/responses/response_output_refusal.py index eba58107..685c8722 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_refusal.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_refusal.py @@ -9,7 +9,7 @@ class ResponseOutputRefusal(BaseModel): refusal: str - """The refusal explanationfrom the model.""" + """The refusal explanation from the model.""" type: Literal["refusal"] """The type of the refusal. Always `refusal`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_refusal_param.py b/portkey_ai/_vendor/openai/types/responses/response_output_refusal_param.py index 53140a60..54cfaf07 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_refusal_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_refusal_param.py @@ -9,7 +9,7 @@ class ResponseOutputRefusalParam(TypedDict, total=False): refusal: Required[str] - """The refusal explanationfrom the model.""" + """The refusal explanation from the model.""" type: Required[Literal["refusal"]] """The type of the refusal. Always `refusal`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_text.py b/portkey_ai/_vendor/openai/types/responses/response_output_text.py index 1ea9a4ba..aa97b629 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_text.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_text.py @@ -22,6 +22,9 @@ class AnnotationFileCitation(BaseModel): file_id: str """The ID of the file.""" + filename: str + """The filename of the file cited.""" + index: int """The index of the file in the list of files.""" @@ -56,6 +59,9 @@ class AnnotationContainerFileCitation(BaseModel): file_id: str """The ID of the file.""" + filename: str + """The filename of the container file cited.""" + start_index: int """The index of the first character of the container file citation in the message.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_text_annotation_added_event.py b/portkey_ai/_vendor/openai/types/responses/response_output_text_annotation_added_event.py index ce96790c..62d8f728 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_text_annotation_added_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_text_annotation_added_event.py @@ -26,5 +26,5 @@ class ResponseOutputTextAnnotationAddedEvent(BaseModel): sequence_number: int """The sequence number of this event.""" - type: Literal["response.output_text_annotation.added"] - """The type of the event. Always 'response.output_text_annotation.added'.""" + type: Literal["response.output_text.annotation.added"] + """The type of the event. Always 'response.output_text.annotation.added'.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_output_text_param.py b/portkey_ai/_vendor/openai/types/responses/response_output_text_param.py index 207901e8..63d2d394 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_output_text_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_output_text_param.py @@ -21,6 +21,9 @@ class AnnotationFileCitation(TypedDict, total=False): file_id: Required[str] """The ID of the file.""" + filename: Required[str] + """The filename of the file cited.""" + index: Required[int] """The index of the file in the list of files.""" @@ -55,6 +58,9 @@ class AnnotationContainerFileCitation(TypedDict, total=False): file_id: Required[str] """The ID of the file.""" + filename: Required[str] + """The filename of the container file cited.""" + start_index: Required[int] """The index of the first character of the container file citation in the message.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_prompt.py b/portkey_ai/_vendor/openai/types/responses/response_prompt.py new file mode 100644 index 00000000..537c2f8f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_prompt.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Union, Optional +from typing_extensions import TypeAlias + +from ..._models import BaseModel +from .response_input_file import ResponseInputFile +from .response_input_text import ResponseInputText +from .response_input_image import ResponseInputImage + +__all__ = ["ResponsePrompt", "Variables"] + +Variables: TypeAlias = Union[str, ResponseInputText, ResponseInputImage, ResponseInputFile] + + +class ResponsePrompt(BaseModel): + id: str + """The unique identifier of the prompt template to use.""" + + variables: Optional[Dict[str, Variables]] = None + """Optional map of values to substitute in for variables in your prompt. + + The substitution values can either be strings, or other Response input types + like images or files. + """ + + version: Optional[str] = None + """Optional version of the prompt template.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_prompt_param.py b/portkey_ai/_vendor/openai/types/responses/response_prompt_param.py new file mode 100644 index 00000000..d935fa51 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_prompt_param.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Optional +from typing_extensions import Required, TypeAlias, TypedDict + +from .response_input_file_param import ResponseInputFileParam +from .response_input_text_param import ResponseInputTextParam +from .response_input_image_param import ResponseInputImageParam + +__all__ = ["ResponsePromptParam", "Variables"] + +Variables: TypeAlias = Union[str, ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam] + + +class ResponsePromptParam(TypedDict, total=False): + id: Required[str] + """The unique identifier of the prompt template to use.""" + + variables: Optional[Dict[str, Variables]] + """Optional map of values to substitute in for variables in your prompt. + + The substitution values can either be strings, or other Response input types + like images or files. + """ + + version: Optional[str] + """Optional version of the prompt template.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_delta_event.py deleted file mode 100644 index f37d3d37..00000000 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_delta_event.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningDeltaEvent"] - - -class ResponseReasoningDeltaEvent(BaseModel): - content_index: int - """The index of the reasoning content part within the output item.""" - - delta: object - """The partial update to the reasoning content.""" - - item_id: str - """The unique identifier of the item for which reasoning is being updated.""" - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - type: Literal["response.reasoning.delta"] - """The type of the event. Always 'response.reasoning.delta'.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_done_event.py deleted file mode 100644 index 9f8b127d..00000000 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_done_event.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningDoneEvent"] - - -class ResponseReasoningDoneEvent(BaseModel): - content_index: int - """The index of the reasoning content part within the output item.""" - - item_id: str - """The unique identifier of the item for which reasoning is finalized.""" - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - text: str - """The finalized reasoning text.""" - - type: Literal["response.reasoning.done"] - """The type of the event. Always 'response.reasoning.done'.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_item.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_item.py index f5da7802..e5cb094e 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_item.py +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_item.py @@ -5,29 +5,38 @@ from ..._models import BaseModel -__all__ = ["ResponseReasoningItem", "Summary"] +__all__ = ["ResponseReasoningItem", "Summary", "Content"] class Summary(BaseModel): text: str - """ - A short summary of the reasoning used by the model when generating the response. - """ + """A summary of the reasoning output from the model so far.""" type: Literal["summary_text"] """The type of the object. Always `summary_text`.""" +class Content(BaseModel): + text: str + """Reasoning text output from the model.""" + + type: Literal["reasoning_text"] + """The type of the object. Always `reasoning_text`.""" + + class ResponseReasoningItem(BaseModel): id: str """The unique identifier of the reasoning content.""" summary: List[Summary] - """Reasoning text contents.""" + """Reasoning summary content.""" type: Literal["reasoning"] """The type of the object. Always `reasoning`.""" + content: Optional[List[Content]] = None + """Reasoning text content.""" + encrypted_content: Optional[str] = None """ The encrypted content of the reasoning item - populated when a response is diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_item_param.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_item_param.py index 2cfa5312..042b6c05 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_item_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_item_param.py @@ -5,29 +5,38 @@ from typing import Iterable, Optional from typing_extensions import Literal, Required, TypedDict -__all__ = ["ResponseReasoningItemParam", "Summary"] +__all__ = ["ResponseReasoningItemParam", "Summary", "Content"] class Summary(TypedDict, total=False): text: Required[str] - """ - A short summary of the reasoning used by the model when generating the response. - """ + """A summary of the reasoning output from the model so far.""" type: Required[Literal["summary_text"]] """The type of the object. Always `summary_text`.""" +class Content(TypedDict, total=False): + text: Required[str] + """Reasoning text output from the model.""" + + type: Required[Literal["reasoning_text"]] + """The type of the object. Always `reasoning_text`.""" + + class ResponseReasoningItemParam(TypedDict, total=False): id: Required[str] """The unique identifier of the reasoning content.""" summary: Required[Iterable[Summary]] - """Reasoning text contents.""" + """Reasoning summary content.""" type: Required[Literal["reasoning"]] """The type of the object. Always `reasoning`.""" + content: Iterable[Content] + """Reasoning text content.""" + encrypted_content: Optional[str] """ The encrypted content of the reasoning item - populated when a response is diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_delta_event.py deleted file mode 100644 index 519a4f24..00000000 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_delta_event.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningSummaryDeltaEvent"] - - -class ResponseReasoningSummaryDeltaEvent(BaseModel): - delta: object - """The partial update to the reasoning summary content.""" - - item_id: str - """ - The unique identifier of the item for which the reasoning summary is being - updated. - """ - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - summary_index: int - """The index of the summary part within the output item.""" - - type: Literal["response.reasoning_summary.delta"] - """The type of the event. Always 'response.reasoning_summary.delta'.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_done_event.py deleted file mode 100644 index 98bcf9cb..00000000 --- a/portkey_ai/_vendor/openai/types/responses/response_reasoning_summary_done_event.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningSummaryDoneEvent"] - - -class ResponseReasoningSummaryDoneEvent(BaseModel): - item_id: str - """The unique identifier of the item for which the reasoning summary is finalized.""" - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - summary_index: int - """The index of the summary part within the output item.""" - - text: str - """The finalized reasoning summary text.""" - - type: Literal["response.reasoning_summary.done"] - """The type of the event. Always 'response.reasoning_summary.done'.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_delta_event.py new file mode 100644 index 00000000..e1df893b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_delta_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseReasoningTextDeltaEvent"] + + +class ResponseReasoningTextDeltaEvent(BaseModel): + content_index: int + """The index of the reasoning content part this delta is associated with.""" + + delta: str + """The text delta that was added to the reasoning content.""" + + item_id: str + """The ID of the item this reasoning text delta is associated with.""" + + output_index: int + """The index of the output item this reasoning text delta is associated with.""" + + sequence_number: int + """The sequence number of this event.""" + + type: Literal["response.reasoning_text.delta"] + """The type of the event. Always `response.reasoning_text.delta`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_done_event.py new file mode 100644 index 00000000..d22d984e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/response_reasoning_text_done_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseReasoningTextDoneEvent"] + + +class ResponseReasoningTextDoneEvent(BaseModel): + content_index: int + """The index of the reasoning content part.""" + + item_id: str + """The ID of the item this reasoning text is associated with.""" + + output_index: int + """The index of the output item this reasoning text is associated with.""" + + sequence_number: int + """The sequence number of this event.""" + + text: str + """The full text of the completed reasoning content.""" + + type: Literal["response.reasoning_text.done"] + """The type of the event. Always `response.reasoning_text.done`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_retrieve_params.py b/portkey_ai/_vendor/openai/types/responses/response_retrieve_params.py index a092bd7f..4013db85 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_retrieve_params.py +++ b/portkey_ai/_vendor/openai/types/responses/response_retrieve_params.py @@ -17,6 +17,17 @@ class ResponseRetrieveParamsBase(TypedDict, total=False): See the `include` parameter for Response creation above for more information. """ + include_obfuscation: bool + """When true, stream obfuscation will be enabled. + + Stream obfuscation adds random characters to an `obfuscation` field on streaming + delta events to normalize payload sizes as a mitigation to certain side-channel + attacks. These obfuscation fields are included by default, but add a small + amount of overhead to the data stream. You can set `include_obfuscation` to + false to optimize for bandwidth if you trust the network links between your + application and the OpenAI API. + """ + starting_after: int """The sequence number of the event after which to start streaming.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_stream_event.py b/portkey_ai/_vendor/openai/types/responses/response_stream_event.py index 24a83f1a..c0a317cd 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_stream_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_stream_event.py @@ -17,21 +17,19 @@ from .response_in_progress_event import ResponseInProgressEvent from .response_refusal_done_event import ResponseRefusalDoneEvent from .response_refusal_delta_event import ResponseRefusalDeltaEvent -from .response_reasoning_done_event import ResponseReasoningDoneEvent from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent -from .response_reasoning_delta_event import ResponseReasoningDeltaEvent from .response_output_item_done_event import ResponseOutputItemDoneEvent from .response_content_part_done_event import ResponseContentPartDoneEvent from .response_output_item_added_event import ResponseOutputItemAddedEvent from .response_content_part_added_event import ResponseContentPartAddedEvent from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent +from .response_reasoning_text_done_event import ResponseReasoningTextDoneEvent from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent +from .response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent -from .response_reasoning_summary_done_event import ResponseReasoningSummaryDoneEvent from .response_mcp_call_arguments_done_event import ResponseMcpCallArgumentsDoneEvent -from .response_reasoning_summary_delta_event import ResponseReasoningSummaryDeltaEvent from .response_image_gen_call_completed_event import ResponseImageGenCallCompletedEvent from .response_mcp_call_arguments_delta_event import ResponseMcpCallArgumentsDeltaEvent from .response_mcp_list_tools_completed_event import ResponseMcpListToolsCompletedEvent @@ -42,9 +40,11 @@ from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent from .response_image_gen_call_in_progress_event import ResponseImageGenCallInProgressEvent from .response_mcp_list_tools_in_progress_event import ResponseMcpListToolsInProgressEvent +from .response_custom_tool_call_input_done_event import ResponseCustomToolCallInputDoneEvent from .response_reasoning_summary_part_done_event import ResponseReasoningSummaryPartDoneEvent from .response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent +from .response_custom_tool_call_input_delta_event import ResponseCustomToolCallInputDeltaEvent from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent from .response_image_gen_call_partial_image_event import ResponseImageGenCallPartialImageEvent @@ -90,6 +90,8 @@ ResponseReasoningSummaryPartDoneEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseReasoningSummaryTextDoneEvent, + ResponseReasoningTextDeltaEvent, + ResponseReasoningTextDoneEvent, ResponseRefusalDeltaEvent, ResponseRefusalDoneEvent, ResponseTextDeltaEvent, @@ -111,10 +113,8 @@ ResponseMcpListToolsInProgressEvent, ResponseOutputTextAnnotationAddedEvent, ResponseQueuedEvent, - ResponseReasoningDeltaEvent, - ResponseReasoningDoneEvent, - ResponseReasoningSummaryDeltaEvent, - ResponseReasoningSummaryDoneEvent, + ResponseCustomToolCallInputDeltaEvent, + ResponseCustomToolCallInputDoneEvent, ], PropertyInfo(discriminator="type"), ] diff --git a/portkey_ai/_vendor/openai/types/responses/response_text_config.py b/portkey_ai/_vendor/openai/types/responses/response_text_config.py index a1894a91..c53546da 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_text_config.py +++ b/portkey_ai/_vendor/openai/types/responses/response_text_config.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Optional +from typing_extensions import Literal from ..._models import BaseModel from .response_format_text_config import ResponseFormatTextConfig @@ -24,3 +25,11 @@ class ResponseTextConfig(BaseModel): ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. """ + + verbosity: Optional[Literal["low", "medium", "high"]] = None + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ diff --git a/portkey_ai/_vendor/openai/types/responses/response_text_config_param.py b/portkey_ai/_vendor/openai/types/responses/response_text_config_param.py index aec064bf..1229fce3 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_text_config_param.py +++ b/portkey_ai/_vendor/openai/types/responses/response_text_config_param.py @@ -2,7 +2,8 @@ from __future__ import annotations -from typing_extensions import TypedDict +from typing import Optional +from typing_extensions import Literal, TypedDict from .response_format_text_config_param import ResponseFormatTextConfigParam @@ -25,3 +26,11 @@ class ResponseTextConfigParam(TypedDict, total=False): ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. """ + + verbosity: Optional[Literal["low", "medium", "high"]] + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ diff --git a/portkey_ai/_vendor/openai/types/responses/response_text_delta_event.py b/portkey_ai/_vendor/openai/types/responses/response_text_delta_event.py index 7e4aec70..b5379b7a 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_text_delta_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_text_delta_event.py @@ -1,10 +1,30 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List, Optional from typing_extensions import Literal from ..._models import BaseModel -__all__ = ["ResponseTextDeltaEvent"] +__all__ = ["ResponseTextDeltaEvent", "Logprob", "LogprobTopLogprob"] + + +class LogprobTopLogprob(BaseModel): + token: Optional[str] = None + """A possible text token.""" + + logprob: Optional[float] = None + """The log probability of this token.""" + + +class Logprob(BaseModel): + token: str + """A possible text token.""" + + logprob: float + """The log probability of this token.""" + + top_logprobs: Optional[List[LogprobTopLogprob]] = None + """The log probability of the top 20 most likely tokens.""" class ResponseTextDeltaEvent(BaseModel): @@ -17,6 +37,9 @@ class ResponseTextDeltaEvent(BaseModel): item_id: str """The ID of the output item that the text delta was added to.""" + logprobs: List[Logprob] + """The log probabilities of the tokens in the delta.""" + output_index: int """The index of the output item that the text delta was added to.""" diff --git a/portkey_ai/_vendor/openai/types/responses/response_text_done_event.py b/portkey_ai/_vendor/openai/types/responses/response_text_done_event.py index 0d5ed4dd..d9776a18 100644 --- a/portkey_ai/_vendor/openai/types/responses/response_text_done_event.py +++ b/portkey_ai/_vendor/openai/types/responses/response_text_done_event.py @@ -1,10 +1,30 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List, Optional from typing_extensions import Literal from ..._models import BaseModel -__all__ = ["ResponseTextDoneEvent"] +__all__ = ["ResponseTextDoneEvent", "Logprob", "LogprobTopLogprob"] + + +class LogprobTopLogprob(BaseModel): + token: Optional[str] = None + """A possible text token.""" + + logprob: Optional[float] = None + """The log probability of this token.""" + + +class Logprob(BaseModel): + token: str + """A possible text token.""" + + logprob: float + """The log probability of this token.""" + + top_logprobs: Optional[List[LogprobTopLogprob]] = None + """The log probability of the top 20 most likely tokens.""" class ResponseTextDoneEvent(BaseModel): @@ -14,6 +34,9 @@ class ResponseTextDoneEvent(BaseModel): item_id: str """The ID of the output item that the text content is finalized.""" + logprobs: List[Logprob] + """The log probabilities of the tokens in the delta.""" + output_index: int """The index of the output item that the text content is finalized.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool.py b/portkey_ai/_vendor/openai/types/responses/tool.py index 904c474e..482d4e75 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool.py +++ b/portkey_ai/_vendor/openai/types/responses/tool.py @@ -3,18 +3,22 @@ from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias +from . import web_search_tool from ..._utils import PropertyInfo from ..._models import BaseModel +from .custom_tool import CustomTool from .computer_tool import ComputerTool from .function_tool import FunctionTool from .web_search_tool import WebSearchTool from .file_search_tool import FileSearchTool +from .web_search_preview_tool import WebSearchPreviewTool __all__ = [ "Tool", + "WebSearchTool", "Mcp", "McpAllowedTools", - "McpAllowedToolsMcpAllowedToolsFilter", + "McpAllowedToolsMcpToolFilter", "McpRequireApproval", "McpRequireApprovalMcpToolApprovalFilter", "McpRequireApprovalMcpToolApprovalFilterAlways", @@ -27,31 +31,57 @@ "LocalShell", ] +WebSearchToolFilters = web_search_tool.Filters +WebSearchToolUserLocation = web_search_tool.UserLocation + +class McpAllowedToolsMcpToolFilter(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ -class McpAllowedToolsMcpAllowedToolsFilter(BaseModel): tool_names: Optional[List[str]] = None """List of allowed tool names.""" -McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter, None] +McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpToolFilter, None] class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + tool_names: Optional[List[str]] = None - """List of tools that require approval.""" + """List of allowed tool names.""" class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + tool_names: Optional[List[str]] = None - """List of tools that do not require approval.""" + """List of allowed tool names.""" class McpRequireApprovalMcpToolApprovalFilter(BaseModel): always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None - """A list of tools that always require approval.""" + """A filter object to specify which tools are allowed.""" never: Optional[McpRequireApprovalMcpToolApprovalFilterNever] = None - """A list of tools that never require approval.""" + """A filter object to specify which tools are allowed.""" McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None] @@ -61,15 +91,49 @@ class Mcp(BaseModel): server_label: str """A label for this MCP server, used to identify it in tool calls.""" - server_url: str - """The URL for the MCP server.""" - type: Literal["mcp"] """The type of the MCP tool. Always `mcp`.""" allowed_tools: Optional[McpAllowedTools] = None """List of allowed tool names or a filter object.""" + authorization: Optional[str] = None + """ + An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + """ + + connector_id: Optional[ + Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + ] = None + """Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + """ + headers: Optional[Dict[str, str]] = None """Optional HTTP headers to send to the MCP server. @@ -79,6 +143,15 @@ class Mcp(BaseModel): require_approval: Optional[McpRequireApproval] = None """Specify which of the MCP server's tools require approval.""" + server_description: Optional[str] = None + """Optional description of the MCP server, used to provide more context.""" + + server_url: Optional[str] = None + """The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. + """ + class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel): type: Literal["auto"] @@ -121,6 +194,13 @@ class ImageGeneration(BaseModel): One of `transparent`, `opaque`, or `auto`. Default: `auto`. """ + input_fidelity: Optional[Literal["high", "low"]] = None + """ + Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + """ + input_image_mask: Optional[ImageGenerationInputImageMask] = None """Optional mask for inpainting. @@ -167,6 +247,17 @@ class LocalShell(BaseModel): Tool: TypeAlias = Annotated[ - Union[FunctionTool, FileSearchTool, WebSearchTool, ComputerTool, Mcp, CodeInterpreter, ImageGeneration, LocalShell], + Union[ + FunctionTool, + FileSearchTool, + ComputerTool, + WebSearchTool, + Mcp, + CodeInterpreter, + ImageGeneration, + LocalShell, + CustomTool, + WebSearchPreviewTool, + ], PropertyInfo(discriminator="type"), ] diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed.py new file mode 100644 index 00000000..d7921dcb --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ToolChoiceAllowed"] + + +class ToolChoiceAllowed(BaseModel): + mode: Literal["auto", "required"] + """Constrains the tools available to the model to a pre-defined set. + + `auto` allows the model to pick from among the allowed tools and generate a + message. + + `required` requires the model to call one or more of the allowed tools. + """ + + tools: List[Dict[str, object]] + """A list of tool definitions that the model should be allowed to call. + + For the Responses API, the list of tool definitions might look like: + + ```json + [ + { "type": "function", "name": "get_weather" }, + { "type": "mcp", "server_label": "deepwiki" }, + { "type": "image_generation" } + ] + ``` + """ + + type: Literal["allowed_tools"] + """Allowed tool configuration type. Always `allowed_tools`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed_param.py new file mode 100644 index 00000000..0712cab4 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_allowed_param.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ToolChoiceAllowedParam"] + + +class ToolChoiceAllowedParam(TypedDict, total=False): + mode: Required[Literal["auto", "required"]] + """Constrains the tools available to the model to a pre-defined set. + + `auto` allows the model to pick from among the allowed tools and generate a + message. + + `required` requires the model to call one or more of the allowed tools. + """ + + tools: Required[Iterable[Dict[str, object]]] + """A list of tool definitions that the model should be allowed to call. + + For the Responses API, the list of tool definitions might look like: + + ```json + [ + { "type": "function", "name": "get_weather" }, + { "type": "mcp", "server_label": "deepwiki" }, + { "type": "image_generation" } + ] + ``` + """ + + type: Required[Literal["allowed_tools"]] + """Allowed tool configuration type. Always `allowed_tools`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_custom.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_custom.py new file mode 100644 index 00000000..d600e536 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_custom.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ToolChoiceCustom"] + + +class ToolChoiceCustom(BaseModel): + name: str + """The name of the custom tool to call.""" + + type: Literal["custom"] + """For custom tool calling, the type is always `custom`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_custom_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_custom_param.py new file mode 100644 index 00000000..55bc53b7 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_custom_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ToolChoiceCustomParam"] + + +class ToolChoiceCustomParam(TypedDict, total=False): + name: Required[str] + """The name of the custom tool to call.""" + + type: Required[Literal["custom"]] + """For custom tool calling, the type is always `custom`.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp.py new file mode 100644 index 00000000..8763d816 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ToolChoiceMcp"] + + +class ToolChoiceMcp(BaseModel): + server_label: str + """The label of the MCP server to use.""" + + type: Literal["mcp"] + """For MCP tools, the type is always `mcp`.""" + + name: Optional[str] = None + """The name of the tool to call on the server.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp_param.py new file mode 100644 index 00000000..afcceb8c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_mcp_param.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ToolChoiceMcpParam"] + + +class ToolChoiceMcpParam(TypedDict, total=False): + server_label: Required[str] + """The label of the MCP server to use.""" + + type: Required[Literal["mcp"]] + """For MCP tools, the type is always `mcp`.""" + + name: Optional[str] + """The name of the tool to call on the server.""" diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py index b9683243..b31a8260 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_types.py @@ -15,7 +15,6 @@ class ToolChoiceTypes(BaseModel): "web_search_preview_2025_03_11", "image_generation", "code_interpreter", - "mcp", ] """The type of hosted tool the model should to use. @@ -28,6 +27,5 @@ class ToolChoiceTypes(BaseModel): - `web_search_preview` - `computer_use_preview` - `code_interpreter` - - `mcp` - `image_generation` """ diff --git a/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py b/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py index 17590075..15e03574 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_choice_types_param.py @@ -16,7 +16,6 @@ class ToolChoiceTypesParam(TypedDict, total=False): "web_search_preview_2025_03_11", "image_generation", "code_interpreter", - "mcp", ] ] """The type of hosted tool the model should to use. @@ -30,6 +29,5 @@ class ToolChoiceTypesParam(TypedDict, total=False): - `web_search_preview` - `computer_use_preview` - `code_interpreter` - - `mcp` - `image_generation` """ diff --git a/portkey_ai/_vendor/openai/types/responses/tool_param.py b/portkey_ai/_vendor/openai/types/responses/tool_param.py index 4174560d..54bc271c 100644 --- a/portkey_ai/_vendor/openai/types/responses/tool_param.py +++ b/portkey_ai/_vendor/openai/types/responses/tool_param.py @@ -2,20 +2,24 @@ from __future__ import annotations -from typing import Dict, List, Union, Optional +from typing import Dict, Union, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from . import web_search_tool_param +from ..chat import ChatCompletionFunctionToolParam +from ..._types import SequenceNotStr +from .custom_tool_param import CustomToolParam from .computer_tool_param import ComputerToolParam from .function_tool_param import FunctionToolParam from .web_search_tool_param import WebSearchToolParam from .file_search_tool_param import FileSearchToolParam -from ..chat.chat_completion_tool_param import ChatCompletionToolParam +from .web_search_preview_tool_param import WebSearchPreviewToolParam __all__ = [ "ToolParam", "Mcp", "McpAllowedTools", - "McpAllowedToolsMcpAllowedToolsFilter", + "McpAllowedToolsMcpToolFilter", "McpRequireApproval", "McpRequireApprovalMcpToolApprovalFilter", "McpRequireApprovalMcpToolApprovalFilterAlways", @@ -28,31 +32,58 @@ "LocalShell", ] +WebSearchTool = web_search_tool_param.WebSearchToolParam +WebSearchToolFilters = web_search_tool_param.Filters +WebSearchToolUserLocation = web_search_tool_param.UserLocation -class McpAllowedToolsMcpAllowedToolsFilter(TypedDict, total=False): - tool_names: List[str] +class McpAllowedToolsMcpToolFilter(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] """List of allowed tool names.""" -McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter] +McpAllowedTools: TypeAlias = Union[SequenceNotStr[str], McpAllowedToolsMcpToolFilter] class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): - tool_names: List[str] - """List of tools that require approval.""" + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] + """List of allowed tool names.""" class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): - tool_names: List[str] - """List of tools that do not require approval.""" + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + + tool_names: SequenceNotStr[str] + """List of allowed tool names.""" class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): always: McpRequireApprovalMcpToolApprovalFilterAlways - """A list of tools that always require approval.""" + """A filter object to specify which tools are allowed.""" never: McpRequireApprovalMcpToolApprovalFilterNever - """A list of tools that never require approval.""" + """A filter object to specify which tools are allowed.""" McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"]] @@ -62,15 +93,47 @@ class Mcp(TypedDict, total=False): server_label: Required[str] """A label for this MCP server, used to identify it in tool calls.""" - server_url: Required[str] - """The URL for the MCP server.""" - type: Required[Literal["mcp"]] """The type of the MCP tool. Always `mcp`.""" allowed_tools: Optional[McpAllowedTools] """List of allowed tool names or a filter object.""" + authorization: str + """ + An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + """ + + connector_id: Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + """Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + """ + headers: Optional[Dict[str, str]] """Optional HTTP headers to send to the MCP server. @@ -80,12 +143,21 @@ class Mcp(TypedDict, total=False): require_approval: Optional[McpRequireApproval] """Specify which of the MCP server's tools require approval.""" + server_description: str + """Optional description of the MCP server, used to provide more context.""" + + server_url: str + """The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. + """ + class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False): type: Required[Literal["auto"]] """Always `auto`.""" - file_ids: List[str] + file_ids: SequenceNotStr[str] """An optional list of uploaded files to make available to your code.""" @@ -122,6 +194,13 @@ class ImageGeneration(TypedDict, total=False): One of `transparent`, `opaque`, or `auto`. Default: `auto`. """ + input_fidelity: Optional[Literal["high", "low"]] + """ + Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + """ + input_image_mask: ImageGenerationInputImageMask """Optional mask for inpainting. @@ -170,13 +249,15 @@ class LocalShell(TypedDict, total=False): ToolParam: TypeAlias = Union[ FunctionToolParam, FileSearchToolParam, - WebSearchToolParam, ComputerToolParam, + WebSearchToolParam, Mcp, CodeInterpreter, ImageGeneration, LocalShell, + CustomToolParam, + WebSearchPreviewToolParam, ] -ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionToolParam] +ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionFunctionToolParam] diff --git a/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool.py b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool.py new file mode 100644 index 00000000..66d6a246 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool.py @@ -0,0 +1,49 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["WebSearchPreviewTool", "UserLocation"] + + +class UserLocation(BaseModel): + type: Literal["approximate"] + """The type of location approximation. Always `approximate`.""" + + city: Optional[str] = None + """Free text input for the city of the user, e.g. `San Francisco`.""" + + country: Optional[str] = None + """ + The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + """ + + region: Optional[str] = None + """Free text input for the region of the user, e.g. `California`.""" + + timezone: Optional[str] = None + """ + The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. + """ + + +class WebSearchPreviewTool(BaseModel): + type: Literal["web_search_preview", "web_search_preview_2025_03_11"] + """The type of the web search tool. + + One of `web_search_preview` or `web_search_preview_2025_03_11`. + """ + + search_context_size: Optional[Literal["low", "medium", "high"]] = None + """High level guidance for the amount of context window space to use for the + search. + + One of `low`, `medium`, or `high`. `medium` is the default. + """ + + user_location: Optional[UserLocation] = None + """The user's location.""" diff --git a/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool_param.py b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool_param.py new file mode 100644 index 00000000..ec2173f8 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/responses/web_search_preview_tool_param.py @@ -0,0 +1,49 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["WebSearchPreviewToolParam", "UserLocation"] + + +class UserLocation(TypedDict, total=False): + type: Required[Literal["approximate"]] + """The type of location approximation. Always `approximate`.""" + + city: Optional[str] + """Free text input for the city of the user, e.g. `San Francisco`.""" + + country: Optional[str] + """ + The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + """ + + region: Optional[str] + """Free text input for the region of the user, e.g. `California`.""" + + timezone: Optional[str] + """ + The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. + """ + + +class WebSearchPreviewToolParam(TypedDict, total=False): + type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]] + """The type of the web search tool. + + One of `web_search_preview` or `web_search_preview_2025_03_11`. + """ + + search_context_size: Literal["low", "medium", "high"] + """High level guidance for the amount of context window space to use for the + search. + + One of `low`, `medium`, or `high`. `medium` is the default. + """ + + user_location: Optional[UserLocation] + """The user's location.""" diff --git a/portkey_ai/_vendor/openai/types/responses/web_search_tool.py b/portkey_ai/_vendor/openai/types/responses/web_search_tool.py index a6bf9511..bde9600c 100644 --- a/portkey_ai/_vendor/openai/types/responses/web_search_tool.py +++ b/portkey_ai/_vendor/openai/types/responses/web_search_tool.py @@ -1,17 +1,25 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional +from typing import List, Optional from typing_extensions import Literal from ..._models import BaseModel -__all__ = ["WebSearchTool", "UserLocation"] +__all__ = ["WebSearchTool", "Filters", "UserLocation"] -class UserLocation(BaseModel): - type: Literal["approximate"] - """The type of location approximation. Always `approximate`.""" +class Filters(BaseModel): + allowed_domains: Optional[List[str]] = None + """Allowed domains for the search. + + If not provided, all domains are allowed. Subdomains of the provided domains are + allowed as well. + + Example: `["pubmed.ncbi.nlm.nih.gov"]` + """ + +class UserLocation(BaseModel): city: Optional[str] = None """Free text input for the city of the user, e.g. `San Francisco`.""" @@ -30,14 +38,20 @@ class UserLocation(BaseModel): user, e.g. `America/Los_Angeles`. """ + type: Optional[Literal["approximate"]] = None + """The type of location approximation. Always `approximate`.""" + class WebSearchTool(BaseModel): - type: Literal["web_search_preview", "web_search_preview_2025_03_11"] + type: Literal["web_search", "web_search_2025_08_26"] """The type of the web search tool. - One of `web_search_preview` or `web_search_preview_2025_03_11`. + One of `web_search` or `web_search_2025_08_26`. """ + filters: Optional[Filters] = None + """Filters for the search.""" + search_context_size: Optional[Literal["low", "medium", "high"]] = None """High level guidance for the amount of context window space to use for the search. @@ -46,4 +60,4 @@ class WebSearchTool(BaseModel): """ user_location: Optional[UserLocation] = None - """The user's location.""" + """The approximate location of the user.""" diff --git a/portkey_ai/_vendor/openai/types/responses/web_search_tool_param.py b/portkey_ai/_vendor/openai/types/responses/web_search_tool_param.py index d0335c01..7fa19e9c 100644 --- a/portkey_ai/_vendor/openai/types/responses/web_search_tool_param.py +++ b/portkey_ai/_vendor/openai/types/responses/web_search_tool_param.py @@ -5,13 +5,23 @@ from typing import Optional from typing_extensions import Literal, Required, TypedDict -__all__ = ["WebSearchToolParam", "UserLocation"] +from ..._types import SequenceNotStr +__all__ = ["WebSearchToolParam", "Filters", "UserLocation"] -class UserLocation(TypedDict, total=False): - type: Required[Literal["approximate"]] - """The type of location approximation. Always `approximate`.""" +class Filters(TypedDict, total=False): + allowed_domains: Optional[SequenceNotStr[str]] + """Allowed domains for the search. + + If not provided, all domains are allowed. Subdomains of the provided domains are + allowed as well. + + Example: `["pubmed.ncbi.nlm.nih.gov"]` + """ + + +class UserLocation(TypedDict, total=False): city: Optional[str] """Free text input for the city of the user, e.g. `San Francisco`.""" @@ -30,14 +40,20 @@ class UserLocation(TypedDict, total=False): user, e.g. `America/Los_Angeles`. """ + type: Literal["approximate"] + """The type of location approximation. Always `approximate`.""" + class WebSearchToolParam(TypedDict, total=False): - type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]] + type: Required[Literal["web_search", "web_search_2025_08_26"]] """The type of the web search tool. - One of `web_search_preview` or `web_search_preview_2025_03_11`. + One of `web_search` or `web_search_2025_08_26`. """ + filters: Optional[Filters] + """Filters for the search.""" + search_context_size: Literal["low", "medium", "high"] """High level guidance for the amount of context window space to use for the search. @@ -46,4 +62,4 @@ class WebSearchToolParam(TypedDict, total=False): """ user_location: Optional[UserLocation] - """The user's location.""" + """The approximate location of the user.""" diff --git a/portkey_ai/_vendor/openai/types/shared/__init__.py b/portkey_ai/_vendor/openai/types/shared/__init__.py index 6ad0ed5e..2930b9ae 100644 --- a/portkey_ai/_vendor/openai/types/shared/__init__.py +++ b/portkey_ai/_vendor/openai/types/shared/__init__.py @@ -12,5 +12,8 @@ from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters from .response_format_text import ResponseFormatText as ResponseFormatText +from .custom_tool_input_format import CustomToolInputFormat as CustomToolInputFormat from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema +from .response_format_text_python import ResponseFormatTextPython as ResponseFormatTextPython +from .response_format_text_grammar import ResponseFormatTextGrammar as ResponseFormatTextGrammar diff --git a/portkey_ai/_vendor/openai/types/shared/all_models.py b/portkey_ai/_vendor/openai/types/shared/all_models.py index fae8c4c8..828f3b56 100644 --- a/portkey_ai/_vendor/openai/types/shared/all_models.py +++ b/portkey_ai/_vendor/openai/types/shared/all_models.py @@ -15,6 +15,10 @@ "o1-pro-2025-03-19", "o3-pro", "o3-pro-2025-06-10", + "o3-deep-research", + "o3-deep-research-2025-06-26", + "o4-mini-deep-research", + "o4-mini-deep-research-2025-06-26", "computer-use-preview", "computer-use-preview-2025-03-11", ], diff --git a/portkey_ai/_vendor/openai/types/shared/chat_model.py b/portkey_ai/_vendor/openai/types/shared/chat_model.py index 309368a3..727c60c1 100644 --- a/portkey_ai/_vendor/openai/types/shared/chat_model.py +++ b/portkey_ai/_vendor/openai/types/shared/chat_model.py @@ -5,6 +5,13 @@ __all__ = ["ChatModel"] ChatModel: TypeAlias = Literal[ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", + "gpt-5-chat-latest", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", diff --git a/portkey_ai/_vendor/openai/types/shared/custom_tool_input_format.py b/portkey_ai/_vendor/openai/types/shared/custom_tool_input_format.py new file mode 100644 index 00000000..53c8323e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/shared/custom_tool_input_format.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = ["CustomToolInputFormat", "Text", "Grammar"] + + +class Text(BaseModel): + type: Literal["text"] + """Unconstrained text format. Always `text`.""" + + +class Grammar(BaseModel): + definition: str + """The grammar definition.""" + + syntax: Literal["lark", "regex"] + """The syntax of the grammar definition. One of `lark` or `regex`.""" + + type: Literal["grammar"] + """Grammar format. Always `grammar`.""" + + +CustomToolInputFormat: TypeAlias = Annotated[Union[Text, Grammar], PropertyInfo(discriminator="type")] diff --git a/portkey_ai/_vendor/openai/types/shared/function_definition.py b/portkey_ai/_vendor/openai/types/shared/function_definition.py index 06baa231..33ebb9ad 100644 --- a/portkey_ai/_vendor/openai/types/shared/function_definition.py +++ b/portkey_ai/_vendor/openai/types/shared/function_definition.py @@ -39,5 +39,5 @@ class FunctionDefinition(BaseModel): If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the - [function calling guide](docs/guides/function-calling). + [function calling guide](https://platform.openai.com/docs/guides/function-calling). """ diff --git a/portkey_ai/_vendor/openai/types/shared/reasoning.py b/portkey_ai/_vendor/openai/types/shared/reasoning.py index 107aab2e..24ce3015 100644 --- a/portkey_ai/_vendor/openai/types/shared/reasoning.py +++ b/portkey_ai/_vendor/openai/types/shared/reasoning.py @@ -11,12 +11,12 @@ class Reasoning(BaseModel): effort: Optional[ReasoningEffort] = None - """**o-series models only** - + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ generate_summary: Optional[Literal["auto", "concise", "detailed"]] = None diff --git a/portkey_ai/_vendor/openai/types/shared/reasoning_effort.py b/portkey_ai/_vendor/openai/types/shared/reasoning_effort.py index ace21b67..4b960cd7 100644 --- a/portkey_ai/_vendor/openai/types/shared/reasoning_effort.py +++ b/portkey_ai/_vendor/openai/types/shared/reasoning_effort.py @@ -5,4 +5,4 @@ __all__ = ["ReasoningEffort"] -ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]] +ReasoningEffort: TypeAlias = Optional[Literal["minimal", "low", "medium", "high"]] diff --git a/portkey_ai/_vendor/openai/types/shared/response_format_text_grammar.py b/portkey_ai/_vendor/openai/types/shared/response_format_text_grammar.py new file mode 100644 index 00000000..b02f99c1 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/shared/response_format_text_grammar.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFormatTextGrammar"] + + +class ResponseFormatTextGrammar(BaseModel): + grammar: str + """The custom grammar for the model to follow.""" + + type: Literal["grammar"] + """The type of response format being defined. Always `grammar`.""" diff --git a/portkey_ai/_vendor/openai/types/shared/response_format_text_python.py b/portkey_ai/_vendor/openai/types/shared/response_format_text_python.py new file mode 100644 index 00000000..4cd18d46 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/shared/response_format_text_python.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFormatTextPython"] + + +class ResponseFormatTextPython(BaseModel): + type: Literal["python"] + """The type of response format being defined. Always `python`.""" diff --git a/portkey_ai/_vendor/openai/types/shared/responses_model.py b/portkey_ai/_vendor/openai/types/shared/responses_model.py index 790c1212..4d353568 100644 --- a/portkey_ai/_vendor/openai/types/shared/responses_model.py +++ b/portkey_ai/_vendor/openai/types/shared/responses_model.py @@ -15,6 +15,10 @@ "o1-pro-2025-03-19", "o3-pro", "o3-pro-2025-06-10", + "o3-deep-research", + "o3-deep-research-2025-06-26", + "o4-mini-deep-research", + "o4-mini-deep-research-2025-06-26", "computer-use-preview", "computer-use-preview-2025-03-11", ], diff --git a/portkey_ai/_vendor/openai/types/shared_params/__init__.py b/portkey_ai/_vendor/openai/types/shared_params/__init__.py index 88947108..b6c0912b 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/__init__.py +++ b/portkey_ai/_vendor/openai/types/shared_params/__init__.py @@ -10,5 +10,6 @@ from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters from .response_format_text import ResponseFormatText as ResponseFormatText +from .custom_tool_input_format import CustomToolInputFormat as CustomToolInputFormat from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema diff --git a/portkey_ai/_vendor/openai/types/shared_params/chat_model.py b/portkey_ai/_vendor/openai/types/shared_params/chat_model.py index 6cd8e7f9..a1e5ab9f 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/chat_model.py +++ b/portkey_ai/_vendor/openai/types/shared_params/chat_model.py @@ -7,6 +7,13 @@ __all__ = ["ChatModel"] ChatModel: TypeAlias = Literal[ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", + "gpt-5-chat-latest", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", diff --git a/portkey_ai/_vendor/openai/types/shared_params/custom_tool_input_format.py b/portkey_ai/_vendor/openai/types/shared_params/custom_tool_input_format.py new file mode 100644 index 00000000..37df393e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/shared_params/custom_tool_input_format.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["CustomToolInputFormat", "Text", "Grammar"] + + +class Text(TypedDict, total=False): + type: Required[Literal["text"]] + """Unconstrained text format. Always `text`.""" + + +class Grammar(TypedDict, total=False): + definition: Required[str] + """The grammar definition.""" + + syntax: Required[Literal["lark", "regex"]] + """The syntax of the grammar definition. One of `lark` or `regex`.""" + + type: Required[Literal["grammar"]] + """Grammar format. Always `grammar`.""" + + +CustomToolInputFormat: TypeAlias = Union[Text, Grammar] diff --git a/portkey_ai/_vendor/openai/types/shared_params/function_definition.py b/portkey_ai/_vendor/openai/types/shared_params/function_definition.py index d45ec13f..b3fdaf86 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/function_definition.py +++ b/portkey_ai/_vendor/openai/types/shared_params/function_definition.py @@ -41,5 +41,5 @@ class FunctionDefinition(TypedDict, total=False): If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the - [function calling guide](docs/guides/function-calling). + [function calling guide](https://platform.openai.com/docs/guides/function-calling). """ diff --git a/portkey_ai/_vendor/openai/types/shared_params/reasoning.py b/portkey_ai/_vendor/openai/types/shared_params/reasoning.py index 73e1a008..7eab2c76 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/reasoning.py +++ b/portkey_ai/_vendor/openai/types/shared_params/reasoning.py @@ -12,12 +12,12 @@ class Reasoning(TypedDict, total=False): effort: Optional[ReasoningEffort] - """**o-series models only** - + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ generate_summary: Optional[Literal["auto", "concise", "detailed"]] diff --git a/portkey_ai/_vendor/openai/types/shared_params/reasoning_effort.py b/portkey_ai/_vendor/openai/types/shared_params/reasoning_effort.py index 6052c5ae..4c095a28 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/reasoning_effort.py +++ b/portkey_ai/_vendor/openai/types/shared_params/reasoning_effort.py @@ -7,4 +7,4 @@ __all__ = ["ReasoningEffort"] -ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]] +ReasoningEffort: TypeAlias = Optional[Literal["minimal", "low", "medium", "high"]] diff --git a/portkey_ai/_vendor/openai/types/shared_params/responses_model.py b/portkey_ai/_vendor/openai/types/shared_params/responses_model.py index ca526b8f..adfcecf1 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/responses_model.py +++ b/portkey_ai/_vendor/openai/types/shared_params/responses_model.py @@ -17,6 +17,10 @@ "o1-pro-2025-03-19", "o3-pro", "o3-pro-2025-06-10", + "o3-deep-research", + "o3-deep-research-2025-06-26", + "o4-mini-deep-research", + "o4-mini-deep-research-2025-06-26", "computer-use-preview", "computer-use-preview-2025-03-11", ], diff --git a/portkey_ai/_vendor/openai/types/upload_complete_params.py b/portkey_ai/_vendor/openai/types/upload_complete_params.py index cce568d5..846a241d 100644 --- a/portkey_ai/_vendor/openai/types/upload_complete_params.py +++ b/portkey_ai/_vendor/openai/types/upload_complete_params.py @@ -2,14 +2,15 @@ from __future__ import annotations -from typing import List from typing_extensions import Required, TypedDict +from .._types import SequenceNotStr + __all__ = ["UploadCompleteParams"] class UploadCompleteParams(TypedDict, total=False): - part_ids: Required[List[str]] + part_ids: Required[SequenceNotStr[str]] """The ordered list of Part IDs.""" md5: str diff --git a/portkey_ai/_vendor/openai/types/upload_create_params.py b/portkey_ai/_vendor/openai/types/upload_create_params.py index 2ebabe6c..ab4cded8 100644 --- a/portkey_ai/_vendor/openai/types/upload_create_params.py +++ b/portkey_ai/_vendor/openai/types/upload_create_params.py @@ -2,11 +2,11 @@ from __future__ import annotations -from typing_extensions import Required, TypedDict +from typing_extensions import Literal, Required, TypedDict from .file_purpose import FilePurpose -__all__ = ["UploadCreateParams"] +__all__ = ["UploadCreateParams", "ExpiresAfter"] class UploadCreateParams(TypedDict, total=False): @@ -29,3 +29,24 @@ class UploadCreateParams(TypedDict, total=False): See the [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). """ + + expires_after: ExpiresAfter + """The expiration policy for a file. + + By default, files with `purpose=batch` expire after 30 days and all other files + are persisted until they are manually deleted. + """ + + +class ExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `created_at`. + """ + + seconds: Required[int] + """The number of seconds after the anchor time that the file will expire. + + Must be between 3600 (1 hour) and 2592000 (30 days). + """ diff --git a/portkey_ai/_vendor/openai/types/vector_store_create_params.py b/portkey_ai/_vendor/openai/types/vector_store_create_params.py index 365d0936..945a9886 100644 --- a/portkey_ai/_vendor/openai/types/vector_store_create_params.py +++ b/portkey_ai/_vendor/openai/types/vector_store_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Optional +from typing import Optional from typing_extensions import Literal, Required, TypedDict +from .._types import SequenceNotStr from .shared_params.metadata import Metadata from .file_chunking_strategy_param import FileChunkingStrategyParam @@ -22,7 +23,7 @@ class VectorStoreCreateParams(TypedDict, total=False): expires_after: ExpiresAfter """The expiration policy for a vector store.""" - file_ids: List[str] + file_ids: SequenceNotStr[str] """ A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access diff --git a/portkey_ai/_vendor/openai/types/vector_store_search_params.py b/portkey_ai/_vendor/openai/types/vector_store_search_params.py index 17573d0f..8b7b13c4 100644 --- a/portkey_ai/_vendor/openai/types/vector_store_search_params.py +++ b/portkey_ai/_vendor/openai/types/vector_store_search_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing import List, Union +from typing import Union from typing_extensions import Literal, Required, TypeAlias, TypedDict +from .._types import SequenceNotStr from .shared_params.compound_filter import CompoundFilter from .shared_params.comparison_filter import ComparisonFilter @@ -12,7 +13,7 @@ class VectorStoreSearchParams(TypedDict, total=False): - query: Required[Union[str, List[str]]] + query: Required[Union[str, SequenceNotStr[str]]] """A query string for a search""" filters: Filters @@ -35,6 +36,7 @@ class VectorStoreSearchParams(TypedDict, total=False): class RankingOptions(TypedDict, total=False): - ranker: Literal["auto", "default-2024-11-15"] + ranker: Literal["none", "auto", "default-2024-11-15"] + """Enable re-ranking; set to `none` to disable, which can help reduce latency.""" score_threshold: float diff --git a/portkey_ai/_vendor/openai/types/vector_stores/file_batch_create_params.py b/portkey_ai/_vendor/openai/types/vector_stores/file_batch_create_params.py index 1a470f75..d8d7b448 100644 --- a/portkey_ai/_vendor/openai/types/vector_stores/file_batch_create_params.py +++ b/portkey_ai/_vendor/openai/types/vector_stores/file_batch_create_params.py @@ -2,16 +2,17 @@ from __future__ import annotations -from typing import Dict, List, Union, Optional +from typing import Dict, Union, Optional from typing_extensions import Required, TypedDict +from ..._types import SequenceNotStr from ..file_chunking_strategy_param import FileChunkingStrategyParam __all__ = ["FileBatchCreateParams"] class FileBatchCreateParams(TypedDict, total=False): - file_ids: Required[List[str]] + file_ids: Required[SequenceNotStr[str]] """ A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access diff --git a/portkey_ai/_vendor/openai/types/webhooks/__init__.py b/portkey_ai/_vendor/openai/types/webhooks/__init__.py new file mode 100644 index 00000000..8b9e5565 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/__init__.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .unwrap_webhook_event import UnwrapWebhookEvent as UnwrapWebhookEvent +from .batch_failed_webhook_event import BatchFailedWebhookEvent as BatchFailedWebhookEvent +from .batch_expired_webhook_event import BatchExpiredWebhookEvent as BatchExpiredWebhookEvent +from .batch_cancelled_webhook_event import BatchCancelledWebhookEvent as BatchCancelledWebhookEvent +from .batch_completed_webhook_event import BatchCompletedWebhookEvent as BatchCompletedWebhookEvent +from .eval_run_failed_webhook_event import EvalRunFailedWebhookEvent as EvalRunFailedWebhookEvent +from .response_failed_webhook_event import ResponseFailedWebhookEvent as ResponseFailedWebhookEvent +from .eval_run_canceled_webhook_event import EvalRunCanceledWebhookEvent as EvalRunCanceledWebhookEvent +from .eval_run_succeeded_webhook_event import EvalRunSucceededWebhookEvent as EvalRunSucceededWebhookEvent +from .response_cancelled_webhook_event import ResponseCancelledWebhookEvent as ResponseCancelledWebhookEvent +from .response_completed_webhook_event import ResponseCompletedWebhookEvent as ResponseCompletedWebhookEvent +from .response_incomplete_webhook_event import ResponseIncompleteWebhookEvent as ResponseIncompleteWebhookEvent +from .fine_tuning_job_failed_webhook_event import FineTuningJobFailedWebhookEvent as FineTuningJobFailedWebhookEvent +from .realtime_call_incoming_webhook_event import RealtimeCallIncomingWebhookEvent as RealtimeCallIncomingWebhookEvent +from .fine_tuning_job_cancelled_webhook_event import ( + FineTuningJobCancelledWebhookEvent as FineTuningJobCancelledWebhookEvent, +) +from .fine_tuning_job_succeeded_webhook_event import ( + FineTuningJobSucceededWebhookEvent as FineTuningJobSucceededWebhookEvent, +) diff --git a/portkey_ai/_vendor/openai/types/webhooks/batch_cancelled_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/batch_cancelled_webhook_event.py new file mode 100644 index 00000000..4bbd7307 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/batch_cancelled_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["BatchCancelledWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the batch API request.""" + + +class BatchCancelledWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the batch API request was cancelled.""" + + data: Data + """Event data payload.""" + + type: Literal["batch.cancelled"] + """The type of the event. Always `batch.cancelled`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/batch_completed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/batch_completed_webhook_event.py new file mode 100644 index 00000000..a47ca156 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/batch_completed_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["BatchCompletedWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the batch API request.""" + + +class BatchCompletedWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the batch API request was completed.""" + + data: Data + """Event data payload.""" + + type: Literal["batch.completed"] + """The type of the event. Always `batch.completed`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/batch_expired_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/batch_expired_webhook_event.py new file mode 100644 index 00000000..e91001e8 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/batch_expired_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["BatchExpiredWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the batch API request.""" + + +class BatchExpiredWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the batch API request expired.""" + + data: Data + """Event data payload.""" + + type: Literal["batch.expired"] + """The type of the event. Always `batch.expired`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/batch_failed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/batch_failed_webhook_event.py new file mode 100644 index 00000000..ef80863e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/batch_failed_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["BatchFailedWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the batch API request.""" + + +class BatchFailedWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the batch API request failed.""" + + data: Data + """Event data payload.""" + + type: Literal["batch.failed"] + """The type of the event. Always `batch.failed`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/eval_run_canceled_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/eval_run_canceled_webhook_event.py new file mode 100644 index 00000000..855359f7 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/eval_run_canceled_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["EvalRunCanceledWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the eval run.""" + + +class EvalRunCanceledWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the eval run was canceled.""" + + data: Data + """Event data payload.""" + + type: Literal["eval.run.canceled"] + """The type of the event. Always `eval.run.canceled`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/eval_run_failed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/eval_run_failed_webhook_event.py new file mode 100644 index 00000000..76716807 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/eval_run_failed_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["EvalRunFailedWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the eval run.""" + + +class EvalRunFailedWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the eval run failed.""" + + data: Data + """Event data payload.""" + + type: Literal["eval.run.failed"] + """The type of the event. Always `eval.run.failed`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/eval_run_succeeded_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/eval_run_succeeded_webhook_event.py new file mode 100644 index 00000000..d0d1fc2b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/eval_run_succeeded_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["EvalRunSucceededWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the eval run.""" + + +class EvalRunSucceededWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the eval run succeeded.""" + + data: Data + """Event data payload.""" + + type: Literal["eval.run.succeeded"] + """The type of the event. Always `eval.run.succeeded`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py new file mode 100644 index 00000000..1fe3c060 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FineTuningJobCancelledWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the fine-tuning job.""" + + +class FineTuningJobCancelledWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the fine-tuning job was cancelled.""" + + data: Data + """Event data payload.""" + + type: Literal["fine_tuning.job.cancelled"] + """The type of the event. Always `fine_tuning.job.cancelled`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py new file mode 100644 index 00000000..71d899c8 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FineTuningJobFailedWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the fine-tuning job.""" + + +class FineTuningJobFailedWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the fine-tuning job failed.""" + + data: Data + """Event data payload.""" + + type: Literal["fine_tuning.job.failed"] + """The type of the event. Always `fine_tuning.job.failed`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py new file mode 100644 index 00000000..470f1fcf --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FineTuningJobSucceededWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the fine-tuning job.""" + + +class FineTuningJobSucceededWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the fine-tuning job succeeded.""" + + data: Data + """Event data payload.""" + + type: Literal["fine_tuning.job.succeeded"] + """The type of the event. Always `fine_tuning.job.succeeded`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/realtime_call_incoming_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/realtime_call_incoming_webhook_event.py new file mode 100644 index 00000000..a166a347 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/realtime_call_incoming_webhook_event.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RealtimeCallIncomingWebhookEvent", "Data", "DataSipHeader"] + + +class DataSipHeader(BaseModel): + name: str + """Name of the SIP Header.""" + + value: str + """Value of the SIP Header.""" + + +class Data(BaseModel): + call_id: str + """The unique ID of this call.""" + + sip_headers: List[DataSipHeader] + """Headers from the SIP Invite.""" + + +class RealtimeCallIncomingWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the model response was completed.""" + + data: Data + """Event data payload.""" + + type: Literal["realtime.call.incoming"] + """The type of the event. Always `realtime.call.incoming`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/response_cancelled_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/response_cancelled_webhook_event.py new file mode 100644 index 00000000..443e360e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/response_cancelled_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCancelledWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the model response.""" + + +class ResponseCancelledWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the model response was cancelled.""" + + data: Data + """Event data payload.""" + + type: Literal["response.cancelled"] + """The type of the event. Always `response.cancelled`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/response_completed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/response_completed_webhook_event.py new file mode 100644 index 00000000..ac1feff3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/response_completed_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCompletedWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the model response.""" + + +class ResponseCompletedWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the model response was completed.""" + + data: Data + """Event data payload.""" + + type: Literal["response.completed"] + """The type of the event. Always `response.completed`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/response_failed_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/response_failed_webhook_event.py new file mode 100644 index 00000000..5b4ba65e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/response_failed_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFailedWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the model response.""" + + +class ResponseFailedWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the model response failed.""" + + data: Data + """Event data payload.""" + + type: Literal["response.failed"] + """The type of the event. Always `response.failed`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/response_incomplete_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/response_incomplete_webhook_event.py new file mode 100644 index 00000000..01609314 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/response_incomplete_webhook_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseIncompleteWebhookEvent", "Data"] + + +class Data(BaseModel): + id: str + """The unique ID of the model response.""" + + +class ResponseIncompleteWebhookEvent(BaseModel): + id: str + """The unique ID of the event.""" + + created_at: int + """The Unix timestamp (in seconds) of when the model response was interrupted.""" + + data: Data + """Event data payload.""" + + type: Literal["response.incomplete"] + """The type of the event. Always `response.incomplete`.""" + + object: Optional[Literal["event"]] = None + """The object of the event. Always `event`.""" diff --git a/portkey_ai/_vendor/openai/types/webhooks/unwrap_webhook_event.py b/portkey_ai/_vendor/openai/types/webhooks/unwrap_webhook_event.py new file mode 100644 index 00000000..952383c0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/webhooks/unwrap_webhook_event.py @@ -0,0 +1,44 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .batch_failed_webhook_event import BatchFailedWebhookEvent +from .batch_expired_webhook_event import BatchExpiredWebhookEvent +from .batch_cancelled_webhook_event import BatchCancelledWebhookEvent +from .batch_completed_webhook_event import BatchCompletedWebhookEvent +from .eval_run_failed_webhook_event import EvalRunFailedWebhookEvent +from .response_failed_webhook_event import ResponseFailedWebhookEvent +from .eval_run_canceled_webhook_event import EvalRunCanceledWebhookEvent +from .eval_run_succeeded_webhook_event import EvalRunSucceededWebhookEvent +from .response_cancelled_webhook_event import ResponseCancelledWebhookEvent +from .response_completed_webhook_event import ResponseCompletedWebhookEvent +from .response_incomplete_webhook_event import ResponseIncompleteWebhookEvent +from .fine_tuning_job_failed_webhook_event import FineTuningJobFailedWebhookEvent +from .realtime_call_incoming_webhook_event import RealtimeCallIncomingWebhookEvent +from .fine_tuning_job_cancelled_webhook_event import FineTuningJobCancelledWebhookEvent +from .fine_tuning_job_succeeded_webhook_event import FineTuningJobSucceededWebhookEvent + +__all__ = ["UnwrapWebhookEvent"] + +UnwrapWebhookEvent: TypeAlias = Annotated[ + Union[ + BatchCancelledWebhookEvent, + BatchCompletedWebhookEvent, + BatchExpiredWebhookEvent, + BatchFailedWebhookEvent, + EvalRunCanceledWebhookEvent, + EvalRunFailedWebhookEvent, + EvalRunSucceededWebhookEvent, + FineTuningJobCancelledWebhookEvent, + FineTuningJobFailedWebhookEvent, + FineTuningJobSucceededWebhookEvent, + RealtimeCallIncomingWebhookEvent, + ResponseCancelledWebhookEvent, + ResponseCompletedWebhookEvent, + ResponseFailedWebhookEvent, + ResponseIncompleteWebhookEvent, + ], + PropertyInfo(discriminator="type"), +] diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index 4f461cca..3ebd8235 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -83,6 +83,8 @@ AsyncBetaRealtime, BetaSessions, AsyncBetaSessions, + BetaTranscriptionSessions, + AsyncBetaTranscriptionSessions, Responses, InputItems, AsyncResponses, @@ -119,6 +121,16 @@ AsyncIntegrationsModels, Providers, AsyncProviders, + Webhooks, + AsyncWebhooks, + MainRealtime, + AsyncMainRealtime, + ClientSecrets, + AsyncClientSecrets, + Conversations, + AsyncConversations, + ConversationsItems, + AsyncConversationsItems, ) from .utils import ( Modes, @@ -237,6 +249,8 @@ "AsyncBetaRealtime", "BetaSessions", "AsyncBetaSessions", + "BetaTranscriptionSessions", + "AsyncBetaTranscriptionSessions", "Responses", "InputItems", "AsyncResponses", @@ -273,4 +287,14 @@ "AsyncIntegrationsModels", "Providers", "AsyncProviders", + "Webhooks", + "AsyncWebhooks", + "MainRealtime", + "AsyncMainRealtime", + "ClientSecrets", + "AsyncClientSecrets", + "Conversations", + "AsyncConversations", + "ConversationsItems", + "AsyncConversationsItems", ] diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index 6a1314b0..6a2de405 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -95,6 +95,8 @@ AsyncBetaRealtime, BetaSessions, AsyncBetaSessions, + BetaTranscriptionSessions, + AsyncBetaTranscriptionSessions, ) from .responses import ( @@ -129,6 +131,8 @@ AsyncContent, ) +from .webhooks import Webhooks, AsyncWebhooks + from .configs import Configs, AsyncConfigs from .api_keys import ApiKeys, AsyncApiKeys @@ -153,6 +157,20 @@ AsyncProviders, ) +from .main_realtime import ( + MainRealtime, + AsyncMainRealtime, + ClientSecrets, + AsyncClientSecrets, +) + +from .conversations import ( + Conversations, + AsyncConversations, + ConversationsItems, + AsyncConversationsItems, +) + sys.modules["openai"] = vendored_openai # For pydantic v1 and v2 compatibility __all__ = [ @@ -249,6 +267,8 @@ "AsyncBetaRealtime", "BetaSessions", "AsyncBetaSessions", + "BetaTranscriptionSessions", + "AsyncBetaTranscriptionSessions", "Responses", "InputItems", "AsyncResponses", @@ -285,4 +305,14 @@ "AsyncIntegrationsModels", "Providers", "AsyncProviders", + "Webhooks", + "AsyncWebhooks", + "MainRealtime", + "AsyncMainRealtime", + "ClientSecrets", + "AsyncClientSecrets", + "Conversations", + "AsyncConversations", + "ConversationsItems", + "AsyncConversationsItems", ] diff --git a/portkey_ai/api_resources/apis/beta_realtime.py b/portkey_ai/api_resources/apis/beta_realtime.py index 3ddcd6bc..b07d8faf 100644 --- a/portkey_ai/api_resources/apis/beta_realtime.py +++ b/portkey_ai/api_resources/apis/beta_realtime.py @@ -1,9 +1,11 @@ -import json from typing import Any, Iterable, List, Union from portkey_ai._vendor.openai.resources.beta.realtime.realtime import ( AsyncRealtimeConnectionManager, RealtimeConnectionManager, ) +from portkey_ai._vendor.openai.types.beta.realtime.transcription_session import ( + TranscriptionSession, +) from portkey_ai._vendor.openai.types.websocket_connection_options import ( WebsocketConnectionOptions, ) @@ -18,6 +20,7 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client self.sessions = BetaSessions(client) + self.transcription_sessions = BetaTranscriptionSessions(client) def connect( self, @@ -34,11 +37,49 @@ def connect( ) +class BetaTranscriptionSessions(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + *, + client_secret: Union[Any, NotGiven] = NOT_GIVEN, + include: Union[List[Any], NotGiven] = NOT_GIVEN, + input_audio_format: Union[Any, NotGiven] = NOT_GIVEN, + input_audio_noise_reduction: Union[Any, NotGiven] = NOT_GIVEN, + input_audio_transcription: Union[Any, NotGiven] = NOT_GIVEN, + modalities: Union[List[Any], NotGiven] = NOT_GIVEN, + turn_detection: Union[Any, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> TranscriptionSession: + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + response = self.openai_client.beta.realtime.transcription_sessions.create( + client_secret=client_secret, + include=include, + input_audio_format=input_audio_format, + input_audio_noise_reduction=input_audio_noise_reduction, + input_audio_transcription=input_audio_transcription, + modalities=modalities, + turn_detection=turn_detection, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body={**(extra_body or {}), **kwargs}, + timeout=timeout, + ) + return response + + class AsyncBetaRealtime(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client self.sessions = AsyncBetaSessions(client) + self.transcription_sessions = AsyncBetaTranscriptionSessions(client) def connect( self, @@ -55,6 +96,43 @@ def connect( ) +class AsyncBetaTranscriptionSessions(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + *, + client_secret: Union[Any, NotGiven] = NOT_GIVEN, + include: Union[List[Any], NotGiven] = NOT_GIVEN, + input_audio_format: Union[Any, NotGiven] = NOT_GIVEN, + input_audio_noise_reduction: Union[Any, NotGiven] = NOT_GIVEN, + input_audio_transcription: Union[Any, NotGiven] = NOT_GIVEN, + modalities: Union[List[Any], NotGiven] = NOT_GIVEN, + turn_detection: Union[Any, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> TranscriptionSession: + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + response = await self.openai_client.beta.realtime.transcription_sessions.create( + client_secret=client_secret, + include=include, + input_audio_format=input_audio_format, + input_audio_noise_reduction=input_audio_noise_reduction, + input_audio_transcription=input_audio_transcription, + modalities=modalities, + turn_detection=turn_detection, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body={**(extra_body or {}), **kwargs}, + timeout=timeout, + ) + return response + + class BetaSessions(APIResource): def __init__(self, client: Portkey) -> None: super().__init__(client) @@ -75,8 +153,13 @@ def create( tools: Union[Iterable[Any], NotGiven] = NOT_GIVEN, turn_detection: Union[Any, NotGiven] = NOT_GIVEN, voice: Union[Any, NotGiven] = NOT_GIVEN, + **kwargs, ) -> SessionCreateResponse: - response = self.openai_client.with_raw_response.beta.realtime.sessions.create( + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + response = self.openai_client.beta.realtime.sessions.create( model=model, input_audio_format=input_audio_format, input_audio_transcription=input_audio_transcription, @@ -89,10 +172,13 @@ def create( tools=tools, turn_detection=turn_detection, voice=voice, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body={**(extra_body or {}), **kwargs}, + timeout=timeout, ) - data = SessionCreateResponse(**json.loads(response.text)) - data._headers = response.headers - return data + + return response # type: ignore[return-value] class AsyncBetaSessions(AsyncAPIResource): @@ -115,23 +201,29 @@ async def create( tools: Union[Iterable[Any], NotGiven] = NOT_GIVEN, turn_detection: Union[Any, NotGiven] = NOT_GIVEN, voice: Union[Any, NotGiven] = NOT_GIVEN, + **kwargs, ) -> SessionCreateResponse: - response = ( - await self.openai_client.with_raw_response.beta.realtime.sessions.create( - model=model, - input_audio_format=input_audio_format, - input_audio_transcription=input_audio_transcription, - instructions=instructions, - max_response_output_tokens=max_response_output_tokens, - modalities=modalities, - output_audio_format=output_audio_format, - temperature=temperature, - tool_choice=tool_choice, - tools=tools, - turn_detection=turn_detection, - voice=voice, - ) + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + response = await self.openai_client.beta.realtime.sessions.create( + model=model, + input_audio_format=input_audio_format, + input_audio_transcription=input_audio_transcription, + instructions=instructions, + max_response_output_tokens=max_response_output_tokens, + modalities=modalities, + output_audio_format=output_audio_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + turn_detection=turn_detection, + voice=voice, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body={**(extra_body or {}), **kwargs}, + timeout=timeout, ) - data = SessionCreateResponse(**json.loads(response.text)) - data._headers = response.headers - return data + + return response # type: ignore[return-value] diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 5a5a6c5f..ed8d3b36 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -15,6 +15,9 @@ ) import httpx +from portkey_ai._vendor.openai.types.chat.parsed_chat_completion import ( + ParsedChatCompletion, +) from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.types.chat_complete_type import ( ChatCompletionChunk, @@ -297,6 +300,169 @@ def delete( return data + def parse( + self, + *, + messages: Iterable[Any], + model: Optional[str] = "portkey-default", + audio: Union[Optional[Any], NotGiven] = NOT_GIVEN, + response_format: Union[Any, NotGiven] = NOT_GIVEN, + frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, + function_call: Union[Any, NotGiven] = NOT_GIVEN, + functions: Union[Iterable[Any], NotGiven] = NOT_GIVEN, + logit_bias: Union[Optional[Dict[str, int]], NotGiven] = NOT_GIVEN, + logprobs: Union[Optional[bool], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[Metadata], NotGiven] = NOT_GIVEN, + modalities: Union[Optional[List[Any]], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + parallel_tool_calls: Union[bool, NotGiven] = NOT_GIVEN, + prediction: Union[Any, NotGiven] = NOT_GIVEN, + presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, + prompt_cache_key: Union[str, NotGiven] = NOT_GIVEN, + reasoning_effort: Union[Any, NotGiven] = NOT_GIVEN, + safety_identifier: Union[str, NotGiven] = NOT_GIVEN, + seed: Union[Optional[int], NotGiven] = NOT_GIVEN, + service_tier: Union[ + Literal["auto", "default", "flex", "scale", "priority"], NotGiven + ] = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + store: Union[Optional[bool], NotGiven] = NOT_GIVEN, + stream_options: Union[Any, NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[Any, NotGiven] = NOT_GIVEN, + tools: Union[Iterable[Any], NotGiven] = NOT_GIVEN, + top_logprobs: Union[Optional[int], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + verbosity: Union[Literal["low", "medium", "high"], NotGiven] = NOT_GIVEN, + web_search_options: Union[Any, NotGiven] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + ) -> ParsedChatCompletion: + response = self.openai_client.chat.completions.parse( + messages=messages, + model=model, # type: ignore[arg-type] + audio=audio, + response_format=response_format, + frequency_penalty=frequency_penalty, + function_call=function_call, + functions=functions, + logit_bias=logit_bias, + logprobs=logprobs, + max_completion_tokens=max_completion_tokens, + max_tokens=max_tokens, + metadata=metadata, + modalities=modalities, + n=n, + parallel_tool_calls=parallel_tool_calls, + prediction=prediction, + presence_penalty=presence_penalty, + prompt_cache_key=prompt_cache_key, + reasoning_effort=reasoning_effort, + safety_identifier=safety_identifier, + seed=seed, + service_tier=service_tier, + stop=stop, + store=store, + stream_options=stream_options, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_logprobs=top_logprobs, + top_p=top_p, + user=user, + verbosity=verbosity, + web_search_options=web_search_options, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + ) + return response + + def stream( + self, + *, + messages: Iterable[Any], + model: Optional[str] = "portkey-default", + audio: Union[Optional[Any], NotGiven] = NOT_GIVEN, + response_format: Union[Any, NotGiven] = NOT_GIVEN, + frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, + function_call: Union[Any, NotGiven] = NOT_GIVEN, + functions: Union[Iterable[Any], NotGiven] = NOT_GIVEN, + logit_bias: Union[Optional[Dict[str, int]], NotGiven] = NOT_GIVEN, + logprobs: Union[Optional[bool], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[Metadata], NotGiven] = NOT_GIVEN, + modalities: Union[Optional[List[Any]], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + parallel_tool_calls: Union[bool, NotGiven] = NOT_GIVEN, + prediction: Union[Any, NotGiven] = NOT_GIVEN, + presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, + prompt_cache_key: Union[str, NotGiven] = NOT_GIVEN, + reasoning_effort: Union[Any, NotGiven] = NOT_GIVEN, + safety_identifier: Union[str, NotGiven] = NOT_GIVEN, + seed: Union[Optional[int], NotGiven] = NOT_GIVEN, + service_tier: Union[ + Literal["auto", "default", "flex", "scale", "priority"], NotGiven + ] = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + store: Union[Optional[bool], NotGiven] = NOT_GIVEN, + stream_options: Union[Any, NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[Any, NotGiven] = NOT_GIVEN, + tools: Union[Iterable[Any], NotGiven] = NOT_GIVEN, + top_logprobs: Union[Optional[int], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + verbosity: Union[Literal["low", "medium", "high"], NotGiven] = NOT_GIVEN, + web_search_options: Union[Any, NotGiven] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + ) -> Any: + return self.openai_client.chat.completions.stream( + messages=messages, + model=model, # type: ignore[arg-type] + audio=audio, + response_format=response_format, + frequency_penalty=frequency_penalty, + function_call=function_call, + functions=functions, + logit_bias=logit_bias, + logprobs=logprobs, + max_completion_tokens=max_completion_tokens, + max_tokens=max_tokens, + metadata=metadata, + modalities=modalities, + n=n, + parallel_tool_calls=parallel_tool_calls, + prediction=prediction, + presence_penalty=presence_penalty, + prompt_cache_key=prompt_cache_key, + reasoning_effort=reasoning_effort, + safety_identifier=safety_identifier, + seed=seed, + service_tier=service_tier, + stop=stop, + store=store, + stream_options=stream_options, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_logprobs=top_logprobs, + top_p=top_p, + user=user, + verbosity=verbosity, + web_search_options=web_search_options, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + ) + class AsyncCompletions(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: @@ -542,6 +708,169 @@ async def delete( return data + async def parse( + self, + *, + messages: Iterable[Any], + model: Optional[str] = "portkey-default", + audio: Union[Optional[Any], NotGiven] = NOT_GIVEN, + response_format: Union[Any, NotGiven] = NOT_GIVEN, + frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, + function_call: Union[Any, NotGiven] = NOT_GIVEN, + functions: Union[Iterable[Any], NotGiven] = NOT_GIVEN, + logit_bias: Union[Optional[Dict[str, int]], NotGiven] = NOT_GIVEN, + logprobs: Union[Optional[bool], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[Metadata], NotGiven] = NOT_GIVEN, + modalities: Union[Optional[List[Any]], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + parallel_tool_calls: Union[bool, NotGiven] = NOT_GIVEN, + prediction: Union[Any, NotGiven] = NOT_GIVEN, + presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, + prompt_cache_key: Union[str, NotGiven] = NOT_GIVEN, + reasoning_effort: Union[Any, NotGiven] = NOT_GIVEN, + safety_identifier: Union[str, NotGiven] = NOT_GIVEN, + seed: Union[Optional[int], NotGiven] = NOT_GIVEN, + service_tier: Union[ + Literal["auto", "default", "flex", "scale", "priority"], NotGiven + ] = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + store: Union[Optional[bool], NotGiven] = NOT_GIVEN, + stream_options: Union[Any, NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[Any, NotGiven] = NOT_GIVEN, + tools: Union[Iterable[Any], NotGiven] = NOT_GIVEN, + top_logprobs: Union[Optional[int], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + verbosity: Union[Literal["low", "medium", "high"], NotGiven] = NOT_GIVEN, + web_search_options: Union[Any, NotGiven] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + ) -> ParsedChatCompletion: + response = await self.openai_client.chat.completions.parse( + messages=messages, + model=model, # type: ignore[arg-type] + audio=audio, + response_format=response_format, + frequency_penalty=frequency_penalty, + function_call=function_call, + functions=functions, + logit_bias=logit_bias, + logprobs=logprobs, + max_completion_tokens=max_completion_tokens, + max_tokens=max_tokens, + metadata=metadata, + modalities=modalities, + n=n, + parallel_tool_calls=parallel_tool_calls, + prediction=prediction, + presence_penalty=presence_penalty, + prompt_cache_key=prompt_cache_key, + reasoning_effort=reasoning_effort, + safety_identifier=safety_identifier, + seed=seed, + service_tier=service_tier, + stop=stop, + store=store, + stream_options=stream_options, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_logprobs=top_logprobs, + top_p=top_p, + user=user, + verbosity=verbosity, + web_search_options=web_search_options, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + ) + return response + + def stream( + self, + *, + messages: Iterable[Any], + model: Optional[str] = "portkey-default", + audio: Union[Optional[Any], NotGiven] = NOT_GIVEN, + response_format: Union[Any, NotGiven] = NOT_GIVEN, + frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, + function_call: Union[Any, NotGiven] = NOT_GIVEN, + functions: Union[Iterable[Any], NotGiven] = NOT_GIVEN, + logit_bias: Union[Optional[Dict[str, int]], NotGiven] = NOT_GIVEN, + logprobs: Union[Optional[bool], NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[Metadata], NotGiven] = NOT_GIVEN, + modalities: Union[Optional[List[Any]], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + parallel_tool_calls: Union[bool, NotGiven] = NOT_GIVEN, + prediction: Union[Any, NotGiven] = NOT_GIVEN, + presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN, + prompt_cache_key: Union[str, NotGiven] = NOT_GIVEN, + reasoning_effort: Union[Any, NotGiven] = NOT_GIVEN, + safety_identifier: Union[str, NotGiven] = NOT_GIVEN, + seed: Union[Optional[int], NotGiven] = NOT_GIVEN, + service_tier: Union[ + Literal["auto", "default", "flex", "scale", "priority"], NotGiven + ] = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + store: Union[Optional[bool], NotGiven] = NOT_GIVEN, + stream_options: Union[Any, NotGiven] = NOT_GIVEN, + temperature: Union[Optional[float], NotGiven] = NOT_GIVEN, + tool_choice: Union[Any, NotGiven] = NOT_GIVEN, + tools: Union[Iterable[Any], NotGiven] = NOT_GIVEN, + top_logprobs: Union[Optional[int], NotGiven] = NOT_GIVEN, + top_p: Union[Optional[float], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + verbosity: Union[Literal["low", "medium", "high"], NotGiven] = NOT_GIVEN, + web_search_options: Union[Any, NotGiven] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + ) -> Any: + return self.openai_client.chat.completions.stream( + messages=messages, + model=model, # type: ignore[arg-type] + audio=audio, + response_format=response_format, + frequency_penalty=frequency_penalty, + function_call=function_call, + functions=functions, + logit_bias=logit_bias, + logprobs=logprobs, + max_completion_tokens=max_completion_tokens, + max_tokens=max_tokens, + metadata=metadata, + modalities=modalities, + n=n, + parallel_tool_calls=parallel_tool_calls, + prediction=prediction, + presence_penalty=presence_penalty, + prompt_cache_key=prompt_cache_key, + reasoning_effort=reasoning_effort, + safety_identifier=safety_identifier, + seed=seed, + service_tier=service_tier, + stop=stop, + store=store, + stream_options=stream_options, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + top_logprobs=top_logprobs, + top_p=top_p, + user=user, + verbosity=verbosity, + web_search_options=web_search_options, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + ) + def _get_config_string(self, config: Union[Mapping, str]) -> str: return config if isinstance(config, str) else json.dumps(config) diff --git a/portkey_ai/api_resources/apis/conversations.py b/portkey_ai/api_resources/apis/conversations.py new file mode 100644 index 00000000..a7b70ce3 --- /dev/null +++ b/portkey_ai/api_resources/apis/conversations.py @@ -0,0 +1,407 @@ +import json +from typing import Any, Iterable, List, Literal, Optional, Union +from portkey_ai._vendor.openai.types.responses.response_includable import ( + ResponseIncludable, +) +from portkey_ai._vendor.openai.types.responses.response_input_item_param import ( + ResponseInputItemParam, +) +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.types.conversation_type import ( + Conversation, + ConversationDeletedResource, +) +from portkey_ai.api_resources.types.shared_types import Headers, Metadata, Query +from ..._vendor.openai._types import Body, NotGiven, NOT_GIVEN +import httpx + + +class Conversations(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.items = ConversationsItems(client) + + def create( + self, + *, + items: Union[Optional[Iterable[ResponseInputItemParam]], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[Metadata], NotGiven] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Conversation: + response = self.openai_client.with_raw_response.conversations.create( + items=items, + metadata=metadata, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = Conversation(**json.loads(response.text)) + data._headers = response.headers + + return data + + def retrieve( + self, + conversation_id: str, + *, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Conversation: + response = self.openai_client.with_raw_response.conversations.retrieve( + conversation_id=conversation_id, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = Conversation(**json.loads(response.text)) + data._headers = response.headers + + return data + + def update( + self, + conversation_id: str, + *, + metadata: Metadata, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Conversation: + response = self.openai_client.with_raw_response.conversations.update( + conversation_id=conversation_id, + metadata=metadata, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = Conversation(**json.loads(response.text)) + data._headers = response.headers + + return data + + def delete( + self, + conversation_id: str, + *, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> ConversationDeletedResource: + response = self.openai_client.with_raw_response.conversations.delete( + conversation_id=conversation_id, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = ConversationDeletedResource(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class AsyncConversations(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.items = AsyncConversationsItems(client) + + async def create( + self, + *, + items: Union[Optional[Iterable[ResponseInputItemParam]], NotGiven] = NOT_GIVEN, + metadata: Union[Optional[Metadata], NotGiven] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Conversation: + response = await self.openai_client.with_raw_response.conversations.create( + items=items, + metadata=metadata, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = Conversation(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def retrieve( + self, + conversation_id: str, + *, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Conversation: + response = await self.openai_client.with_raw_response.conversations.retrieve( + conversation_id=conversation_id, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = Conversation(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def update( + self, + conversation_id: str, + *, + metadata: Metadata, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Conversation: + response = await self.openai_client.with_raw_response.conversations.update( + conversation_id=conversation_id, + metadata=metadata, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = Conversation(**json.loads(response.text)) + data._headers = response.headers + + return data + + async def delete( + self, + conversation_id: str, + *, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> ConversationDeletedResource: + response = await self.openai_client.with_raw_response.conversations.delete( + conversation_id=conversation_id, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = ConversationDeletedResource(**json.loads(response.text)) + data._headers = response.headers + + return data + + +class ConversationsItems(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + conversation_id: str, + *, + items: Iterable[ResponseInputItemParam], + include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Any: + response = self.openai_client.conversations.items.create( + conversation_id=conversation_id, + items=items, + include=include, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + + return response + + def retrieve( + self, + item_id: str, + *, + conversation_id: str, + include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Any: + response = self.openai_client.conversations.items.retrieve( + item_id=item_id, + conversation_id=conversation_id, + include=include, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + + return response + + def list( + self, + conversation_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[NotGiven, Literal["asc", "desc"]] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Any: + response = self.openai_client.conversations.items.list( + conversation_id=conversation_id, + after=after, + include=include, + limit=limit, + order=order, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + + return response + + def delete( + self, + item_id: str, + *, + conversation_id: str, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Any: + response = self.openai_client.conversations.items.delete( + item_id=item_id, + conversation_id=conversation_id, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + + return response + + +class AsyncConversationsItems(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + conversation_id: str, + *, + items: Iterable[ResponseInputItemParam], + include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Any: + response = await self.openai_client.conversations.items.create( + conversation_id=conversation_id, + items=items, + include=include, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + + return response + + async def retrieve( + self, + item_id: str, + *, + conversation_id: str, + include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Any: + response = await self.openai_client.conversations.items.retrieve( + item_id=item_id, + conversation_id=conversation_id, + include=include, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + + return response + + async def list( + self, + conversation_id: str, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[NotGiven, Literal["asc", "desc"]] = NOT_GIVEN, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Any: + response = await self.openai_client.conversations.items.list( + conversation_id=conversation_id, + after=after, + include=include, + limit=limit, + order=order, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + + return response + + async def delete( + self, + item_id: str, + *, + conversation_id: str, + extra_headers: Optional[Headers] = None, + extra_query: Optional[Query] = None, + extra_body: Optional[Body] = None, + timeout: Union[float, httpx.Timeout, NotGiven] = NOT_GIVEN, + ) -> Any: + response = await self.openai_client.conversations.items.delete( + item_id=item_id, + conversation_id=conversation_id, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + + return response diff --git a/portkey_ai/api_resources/apis/images.py b/portkey_ai/api_resources/apis/images.py index c9a22660..db7c942b 100644 --- a/portkey_ai/api_resources/apis/images.py +++ b/portkey_ai/api_resources/apis/images.py @@ -1,10 +1,17 @@ import json -from typing import Union, Any +from typing import List, Literal, Optional, Union import typing +from portkey_ai._vendor.openai._streaming import AsyncStream, Stream +from portkey_ai._vendor.openai.types.image_edit_stream_event import ImageEditStreamEvent +from portkey_ai._vendor.openai.types.image_gen_stream_event import ImageGenStreamEvent +from portkey_ai._vendor.openai.types.images_response import ( + ImagesResponse as OpenAIImagesResponse, +) from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.types.image_type import ImagesResponse -from ..._vendor.openai._types import NotGiven, NOT_GIVEN +from ..._vendor.openai._types import FileTypes, NotGiven, NOT_GIVEN +from typing_extensions import overload class Images(APIResource): @@ -38,65 +45,285 @@ def create_variation( return data - @typing.no_type_check + @overload def edit( self, *, + image: Union[FileTypes, List[FileTypes]], prompt: str, - image, - mask: Union[Any, NotGiven] = NOT_GIVEN, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + input_fidelity: Union[Optional[str], NotGiven] = NOT_GIVEN, + mask: Union[FileTypes, NotGiven] = NOT_GIVEN, model: Union[str, NotGiven] = NOT_GIVEN, - n: Union[int, NotGiven] = NOT_GIVEN, - response_format: Union[str, NotGiven] = NOT_GIVEN, - size: Union[str, NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + stream: Union[Optional[Literal[False]], NotGiven] = NOT_GIVEN, user: Union[str, NotGiven] = NOT_GIVEN, **kwargs - ) -> ImagesResponse: - response = self.openai_client.with_raw_response.images.edit( - prompt=prompt, - image=image, - mask=mask, - model=model, - n=n, - response_format=response_format, - size=size, - user=user, - extra_body=kwargs, - ) - data = ImagesResponse(**json.loads(response.text)) - data._headers = response.headers + ) -> Union[OpenAIImagesResponse, ImagesResponse]: + ... - return data + @overload + def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + stream: Literal[True], + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + input_fidelity: Union[Optional[str], NotGiven] = NOT_GIVEN, + mask: Union[FileTypes, NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Stream[ImageEditStreamEvent]: + ... - @typing.no_type_check + @overload + def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + stream: bool, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + input_fidelity: Union[Optional[str], NotGiven] = NOT_GIVEN, + mask: Union[FileTypes, NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Union[OpenAIImagesResponse, Stream[ImageEditStreamEvent], ImagesResponse]: + ... + + def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + input_fidelity: Union[Optional[str], NotGiven] = NOT_GIVEN, + mask: Union[FileTypes, NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + stream: Union[Optional[Literal[False]], Literal[True], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Union[OpenAIImagesResponse, Stream[ImageEditStreamEvent], ImagesResponse]: + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + + if stream: + return self.openai_client.images.edit( # type: ignore[misc] + image=image, + prompt=prompt, + background=background, # type: ignore[arg-type] + input_fidelity=input_fidelity, # type: ignore[arg-type] + mask=mask, + model=model, + n=n, + output_compression=output_compression, + output_format=output_format, # type: ignore[arg-type] + partial_images=partial_images, + quality=quality, # type: ignore[arg-type] + response_format=response_format, # type: ignore[arg-type] + size=size, # type: ignore[arg-type] + stream=stream, # type: ignore[arg-type] + user=user, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + else: + response = self.openai_client.with_raw_response.images.edit( # type: ignore[misc] + image=image, + prompt=prompt, + background=background, # type: ignore[arg-type] + input_fidelity=input_fidelity, # type: ignore[arg-type] + mask=mask, + model=model, + n=n, + output_compression=output_compression, + output_format=output_format, # type: ignore[arg-type] + partial_images=partial_images, + quality=quality, # type: ignore[arg-type] + response_format=response_format, # type: ignore[arg-type] + size=size, # type: ignore[arg-type] + user=user, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = ImagesResponse(**json.loads(response.text)) + data._headers = response.headers + + return data + + @overload def generate( self, *, prompt: str, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, model: Union[str, NotGiven] = NOT_GIVEN, - n: Union[int, NotGiven] = NOT_GIVEN, - quality: Union[str, NotGiven] = NOT_GIVEN, - response_format: Union[str, NotGiven] = NOT_GIVEN, - size: Union[str, NotGiven] = NOT_GIVEN, + moderation: Union[Optional[str], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + stream: Union[Optional[Literal[False]], NotGiven] = NOT_GIVEN, + style: Union[Optional[str], NotGiven] = NOT_GIVEN, user: Union[str, NotGiven] = NOT_GIVEN, - style: Union[str, NotGiven] = NOT_GIVEN, **kwargs - ) -> ImagesResponse: - response = self.openai_client.with_raw_response.images.generate( - prompt=prompt, - model=model, - n=n, - quality=quality, - response_format=response_format, - size=size, - style=style, - user=user, - extra_body=kwargs, - ) - data = ImagesResponse(**json.loads(response.text)) - data._headers = response.headers + ) -> Union[OpenAIImagesResponse, ImagesResponse]: + ... - return data + @overload + def generate( + self, + *, + prompt: str, + stream: Literal[True], + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + moderation: Union[Optional[str], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + style: Union[Optional[str], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Stream[ImageGenStreamEvent]: + ... + + @overload + def generate( + self, + *, + prompt: str, + stream: bool, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + moderation: Union[Optional[str], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + style: Union[Optional[str], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Union[OpenAIImagesResponse, Stream[ImageGenStreamEvent], ImagesResponse]: + ... + + def generate( + self, + *, + prompt: str, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + moderation: Union[Optional[str], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + stream: Union[ + Optional[Union[Literal[False], Literal[True]]], NotGiven + ] = NOT_GIVEN, + style: Union[Optional[str], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Union[OpenAIImagesResponse, Stream[ImageGenStreamEvent], ImagesResponse]: + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + + if stream: + return self.openai_client.images.generate( # type: ignore[misc] + prompt=prompt, + background=background, # type: ignore[arg-type] + model=model, + moderation=moderation, # type: ignore[arg-type] + n=n, + output_compression=output_compression, + output_format=output_format, # type: ignore[arg-type] + partial_images=partial_images, + quality=quality, # type: ignore[arg-type] + response_format=response_format, # type: ignore[arg-type] + size=size, # type: ignore[arg-type] + stream=stream, # type: ignore[arg-type] + style=style, # type: ignore[arg-type] + user=user, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + else: + response = self.openai_client.with_raw_response.images.generate( # type: ignore[misc] + prompt=prompt, + background=background, # type: ignore[arg-type] + model=model, + moderation=moderation, # type: ignore[arg-type] + n=n, + output_compression=output_compression, + output_format=output_format, # type: ignore[arg-type] + partial_images=partial_images, + quality=quality, # type: ignore[arg-type] + response_format=response_format, # type: ignore[arg-type] + size=size, # type: ignore[arg-type] + style=style, # type: ignore[arg-type] + user=user, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = ImagesResponse(**json.loads(response.text)) + data._headers = response.headers + + return data class AsyncImages(AsyncAPIResource): @@ -128,62 +355,282 @@ async def create_variation( return data - @typing.no_type_check + @overload async def edit( self, *, + image: Union[FileTypes, List[FileTypes]], prompt: str, - image, - mask: Union[Any, NotGiven] = NOT_GIVEN, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + input_fidelity: Union[Optional[str], NotGiven] = NOT_GIVEN, + mask: Union[FileTypes, NotGiven] = NOT_GIVEN, model: Union[str, NotGiven] = NOT_GIVEN, - n: Union[int, NotGiven] = NOT_GIVEN, - response_format: Union[str, NotGiven] = NOT_GIVEN, - size: Union[str, NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + stream: Union[Optional[Literal[False]], NotGiven] = NOT_GIVEN, user: Union[str, NotGiven] = NOT_GIVEN, **kwargs - ) -> ImagesResponse: - response = await self.openai_client.with_raw_response.images.edit( - prompt=prompt, - image=image, - mask=mask, - model=model, - n=n, - response_format=response_format, - size=size, - user=user, - extra_body=kwargs, - ) - data = ImagesResponse(**json.loads(response.text)) - data._headers = response.headers + ) -> Union[OpenAIImagesResponse, ImagesResponse]: + ... - return data + @overload + async def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + stream: Literal[True], + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + input_fidelity: Union[Optional[str], NotGiven] = NOT_GIVEN, + mask: Union[FileTypes, NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> AsyncStream[ImageEditStreamEvent]: + ... - @typing.no_type_check + @overload + async def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + stream: bool, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + input_fidelity: Union[Optional[str], NotGiven] = NOT_GIVEN, + mask: Union[FileTypes, NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Union[OpenAIImagesResponse, AsyncStream[ImageEditStreamEvent], ImagesResponse]: + ... + + async def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + input_fidelity: Union[Optional[str], NotGiven] = NOT_GIVEN, + mask: Union[FileTypes, NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + stream: Union[Optional[Literal[False]], Literal[True], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Union[OpenAIImagesResponse, AsyncStream[ImageEditStreamEvent], ImagesResponse]: + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + + if stream: + return await self.openai_client.images.edit( # type: ignore[misc] + image=image, + prompt=prompt, + background=background, # type: ignore[arg-type] + input_fidelity=input_fidelity, # type: ignore[arg-type] + mask=mask, + model=model, + n=n, + output_compression=output_compression, + output_format=output_format, # type: ignore[arg-type] + partial_images=partial_images, + quality=quality, # type: ignore[arg-type] + response_format=response_format, # type: ignore[arg-type] + size=size, # type: ignore[arg-type] + stream=stream, # type: ignore[arg-type] + user=user, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + else: + response = await self.openai_client.with_raw_response.images.edit( # type: ignore[misc] + image=image, + prompt=prompt, + background=background, # type: ignore[arg-type] + input_fidelity=input_fidelity, # type: ignore[arg-type] + mask=mask, + model=model, + n=n, + output_compression=output_compression, + output_format=output_format, # type: ignore[arg-type] + partial_images=partial_images, + quality=quality, # type: ignore[arg-type] + response_format=response_format, # type: ignore[arg-type] + size=size, # type: ignore[arg-type] + user=user, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = ImagesResponse(**json.loads(response.text)) + data._headers = response.headers + + return data + + @overload async def generate( self, *, prompt: str, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, model: Union[str, NotGiven] = NOT_GIVEN, - n: Union[int, NotGiven] = NOT_GIVEN, - quality: Union[str, NotGiven] = NOT_GIVEN, - response_format: Union[str, NotGiven] = NOT_GIVEN, - size: Union[str, NotGiven] = NOT_GIVEN, + moderation: Union[Optional[str], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + stream: Union[Optional[Literal[False]], NotGiven] = NOT_GIVEN, + style: Union[Optional[str], NotGiven] = NOT_GIVEN, user: Union[str, NotGiven] = NOT_GIVEN, - style: Union[str, NotGiven] = NOT_GIVEN, **kwargs - ) -> ImagesResponse: - response = await self.openai_client.with_raw_response.images.generate( - prompt=prompt, - model=model, - n=n, - quality=quality, - response_format=response_format, - size=size, - style=style, - user=user, - extra_body=kwargs, - ) - data = ImagesResponse(**json.loads(response.text)) - data._headers = response.headers + ) -> Union[OpenAIImagesResponse, ImagesResponse]: + ... - return data + @overload + async def generate( + self, + *, + prompt: str, + stream: Literal[True], + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + moderation: Union[Optional[str], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + style: Union[Optional[str], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> AsyncStream[ImageGenStreamEvent]: + ... + + @overload + async def generate( + self, + *, + prompt: str, + stream: bool, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + moderation: Union[Optional[str], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + style: Union[Optional[str], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Union[OpenAIImagesResponse, AsyncStream[ImageGenStreamEvent], ImagesResponse]: + ... + + async def generate( + self, + *, + prompt: str, + background: Union[Optional[str], NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, + moderation: Union[Optional[str], NotGiven] = NOT_GIVEN, + n: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_compression: Union[Optional[int], NotGiven] = NOT_GIVEN, + output_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + partial_images: Union[Optional[int], NotGiven] = NOT_GIVEN, + quality: Union[Optional[str], NotGiven] = NOT_GIVEN, + response_format: Union[Optional[str], NotGiven] = NOT_GIVEN, + size: Union[Optional[str], NotGiven] = NOT_GIVEN, + stream: Union[ + Optional[Union[Literal[False], Literal[True]]], NotGiven + ] = NOT_GIVEN, + style: Union[Optional[str], NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + **kwargs + ) -> Union[OpenAIImagesResponse, AsyncStream[ImageGenStreamEvent], ImagesResponse]: + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + + if stream: + return await self.openai_client.images.generate( # type: ignore[misc] + prompt=prompt, + background=background, # type: ignore[arg-type] + model=model, + moderation=moderation, # type: ignore[arg-type] + n=n, + output_compression=output_compression, + output_format=output_format, # type: ignore[arg-type] + partial_images=partial_images, + quality=quality, # type: ignore[arg-type] + response_format=response_format, # type: ignore[arg-type] + size=size, # type: ignore[arg-type] + stream=stream, # type: ignore[arg-type] + style=style, # type: ignore[arg-type] + user=user, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + else: + response = await self.openai_client.with_raw_response.images.generate( # type: ignore[misc] + prompt=prompt, + background=background, # type: ignore[arg-type] + model=model, + moderation=moderation, # type: ignore[arg-type] + n=n, + output_compression=output_compression, + output_format=output_format, # type: ignore[arg-type] + partial_images=partial_images, + quality=quality, # type: ignore[arg-type] + response_format=response_format, # type: ignore[arg-type] + size=size, # type: ignore[arg-type] + style=style, # type: ignore[arg-type] + user=user, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + ) + data = ImagesResponse(**json.loads(response.text)) + data._headers = response.headers + + return data diff --git a/portkey_ai/api_resources/apis/main_realtime.py b/portkey_ai/api_resources/apis/main_realtime.py new file mode 100644 index 00000000..76d78480 --- /dev/null +++ b/portkey_ai/api_resources/apis/main_realtime.py @@ -0,0 +1,109 @@ +from typing import Any +from portkey_ai._vendor.openai.resources.realtime.realtime import ( + AsyncRealtimeConnectionManager, + RealtimeConnectionManager, +) +from portkey_ai._vendor.openai.types.websocket_connection_options import ( + WebsocketConnectionOptions, +) +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.types.shared_types import Headers, Query + + +class MainRealtime(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.client_secrets = ClientSecrets(client) + + def connect( + self, + *, + model: str, + extra_query: Query = {}, + extra_headers: Headers = {}, + websocket_connection_options: WebsocketConnectionOptions = {}, + ) -> RealtimeConnectionManager: + return self.openai_client.realtime.connect( + model=model, + extra_query=extra_query, + extra_headers=extra_headers, + websocket_connection_options=websocket_connection_options, + ) + + +class AsyncMainRealtime(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.client_secrets = AsyncClientSecrets(client) + + def connect( + self, + *, + model: str, + extra_query: Query = {}, + extra_headers: Headers = {}, + websocket_connection_options: WebsocketConnectionOptions = {}, + ) -> AsyncRealtimeConnectionManager: + return self.openai_client.realtime.connect( + model=model, + extra_query=extra_query, + extra_headers=extra_headers, + websocket_connection_options=websocket_connection_options, + ) + + +class ClientSecrets(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + *, + expires_after: Any, + session: Any, + **kwargs: Any, + ) -> Any: + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + + return self.openai_client.realtime.client_secrets.create( + expires_after=expires_after, + session=session, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body={**(extra_body or {}), **kwargs}, + timeout=timeout, + ) + + +class AsyncClientSecrets(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + *, + expires_after: Any, + session: Any, + **kwargs: Any, + ) -> Any: + extra_headers = kwargs.pop("extra_headers", None) + extra_query = kwargs.pop("extra_query", None) + extra_body = kwargs.pop("extra_body", None) + timeout = kwargs.pop("timeout", None) + + return await self.openai_client.realtime.client_secrets.create( + expires_after=expires_after, + session=session, + extra_headers=extra_headers, + extra_query=extra_query, + extra_body={**(extra_body or {}), **kwargs}, + timeout=timeout, + ) diff --git a/portkey_ai/api_resources/apis/responses.py b/portkey_ai/api_resources/apis/responses.py index aa02eb1f..643df8f0 100644 --- a/portkey_ai/api_resources/apis/responses.py +++ b/portkey_ai/api_resources/apis/responses.py @@ -47,8 +47,8 @@ def __init__(self, client: Portkey) -> None: def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, + input: Union[str, ResponseInputParam, NotGiven] = NOT_GIVEN, + model: Union[ResponsesModel, NotGiven] = NOT_GIVEN, include: Union[Optional[List[ResponseIncludable]], NotGiven] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, max_output_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, @@ -73,8 +73,8 @@ def create( def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, + input: Union[str, ResponseInputParam, NotGiven] = NOT_GIVEN, + model: Union[ResponsesModel, NotGiven] = NOT_GIVEN, stream: Literal[True], include: Union[Optional[List[ResponseIncludable]], NotGiven] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, @@ -99,8 +99,8 @@ def create( def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, + input: Union[str, ResponseInputParam, NotGiven] = NOT_GIVEN, + model: Union[ResponsesModel, NotGiven] = NOT_GIVEN, stream: bool, include: Union[Optional[List[ResponseIncludable]], NotGiven] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, @@ -124,8 +124,8 @@ def create( def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, + input: Union[str, ResponseInputParam, NotGiven] = NOT_GIVEN, + model: Union[ResponsesModel, NotGiven] = NOT_GIVEN, include: Union[Optional[List[ResponseIncludable]], NotGiven] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, max_output_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, @@ -170,7 +170,7 @@ def create( user=user, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) @@ -179,6 +179,8 @@ def retrieve( response_id: str, *, include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, + include_obfuscation: Union[bool, NotGiven] = NOT_GIVEN, + starting_after: Union[int, NotGiven] = NOT_GIVEN, **kwargs, ) -> ResponseType: extra_headers = kwargs.pop("extra_headers", None) @@ -189,9 +191,11 @@ def retrieve( response = self.openai_client.with_raw_response.responses.retrieve( response_id=response_id, include=include, + include_obfuscation=include_obfuscation, + starting_after=starting_after, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) @@ -210,7 +214,7 @@ def delete(self, response_id: str, **kwargs) -> None: response_id=response_id, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) @@ -229,6 +233,9 @@ def stream( previous_response_id: Union[str, NotGiven] = NOT_GIVEN, reasoning: Union[Reasoning, NotGiven] = NOT_GIVEN, store: Union[bool, NotGiven] = NOT_GIVEN, + stream_options: Union[ + response_create_params.StreamOptions, NotGiven + ] = NOT_GIVEN, temperature: Union[float, NotGiven] = NOT_GIVEN, text: Union[ResponseTextConfigParam, NotGiven] = NOT_GIVEN, tool_choice: Union[response_create_params.ToolChoice, NotGiven] = NOT_GIVEN, @@ -254,6 +261,7 @@ def stream( previous_response_id=previous_response_id, reasoning=reasoning, store=store, + stream_options=stream_options, temperature=temperature, text=text, tool_choice=tool_choice, @@ -262,15 +270,15 @@ def stream( user=user, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) def parse( self, *, - input: Union[str, ResponseInputParam], - model: Union[str, ChatModel], + input: Union[str, ResponseInputParam, NotGiven] = NOT_GIVEN, + model: Union[str, ChatModel, NotGiven] = NOT_GIVEN, text_format: Union[type[TextFormatT], NotGiven] = NOT_GIVEN, # type: ignore[type-arg] tools: Union[Iterable[ParseableToolParam], NotGiven] = NOT_GIVEN, include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, @@ -317,7 +325,7 @@ def parse( user=user, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) @@ -334,7 +342,7 @@ def cancel( response_id=response_id, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) @@ -349,7 +357,6 @@ def list( response_id: str, *, after: Union[str, NotGiven] = NOT_GIVEN, - before: Union[str, NotGiven] = NOT_GIVEN, include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, limit: Union[int, NotGiven] = NOT_GIVEN, order: Union[Literal["asc", "desc"], NotGiven] = NOT_GIVEN, @@ -362,13 +369,12 @@ def list( response = self.openai_client.responses.input_items.list( response_id=response_id, after=after, - before=before, include=include, limit=limit, order=order, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) @@ -385,8 +391,8 @@ def __init__(self, client: AsyncPortkey) -> None: async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, + input: Union[str, ResponseInputParam, NotGiven] = NOT_GIVEN, + model: Union[ResponsesModel, NotGiven] = NOT_GIVEN, include: Union[Optional[List[ResponseIncludable]], NotGiven] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, max_output_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, @@ -411,8 +417,8 @@ async def create( async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, + input: Union[str, ResponseInputParam, NotGiven] = NOT_GIVEN, + model: Union[ResponsesModel, NotGiven] = NOT_GIVEN, stream: Literal[True], include: Union[Optional[List[ResponseIncludable]], NotGiven] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, @@ -437,8 +443,8 @@ async def create( async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, + input: Union[str, ResponseInputParam, NotGiven] = NOT_GIVEN, + model: Union[ResponsesModel, NotGiven] = NOT_GIVEN, stream: bool, include: Union[Optional[List[ResponseIncludable]], NotGiven] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, @@ -462,8 +468,8 @@ async def create( async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, + input: Union[str, ResponseInputParam, NotGiven] = NOT_GIVEN, + model: Union[ResponsesModel, NotGiven] = NOT_GIVEN, include: Union[Optional[List[ResponseIncludable]], NotGiven] = NOT_GIVEN, instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, max_output_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, @@ -508,7 +514,7 @@ async def create( user=user, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) @@ -517,6 +523,8 @@ async def retrieve( response_id: str, *, include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, + include_obfuscation: Union[bool, NotGiven] = NOT_GIVEN, + starting_after: Union[int, NotGiven] = NOT_GIVEN, **kwargs, ) -> ResponseType: extra_headers = kwargs.pop("extra_headers", None) @@ -527,9 +535,11 @@ async def retrieve( response = await self.openai_client.with_raw_response.responses.retrieve( response_id=response_id, include=include, + include_obfuscation=include_obfuscation, + starting_after=starting_after, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) @@ -548,7 +558,7 @@ async def delete(self, response_id: str, **kwargs) -> None: response_id=response_id, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) @@ -567,6 +577,9 @@ def stream( previous_response_id: Union[str, NotGiven] = NOT_GIVEN, reasoning: Union[Reasoning, NotGiven] = NOT_GIVEN, store: Union[bool, NotGiven] = NOT_GIVEN, + stream_options: Union[ + response_create_params.StreamOptions, NotGiven + ] = NOT_GIVEN, temperature: Union[float, NotGiven] = NOT_GIVEN, text: Union[ResponseTextConfigParam, NotGiven] = NOT_GIVEN, tool_choice: Union[response_create_params.ToolChoice, NotGiven] = NOT_GIVEN, @@ -592,6 +605,7 @@ def stream( previous_response_id=previous_response_id, reasoning=reasoning, store=store, + stream_options=stream_options, temperature=temperature, text=text, tool_choice=tool_choice, @@ -600,15 +614,15 @@ def stream( user=user, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) async def parse( self, *, - input: Union[str, ResponseInputParam], - model: Union[str, ChatModel], + input: Union[str, ResponseInputParam, NotGiven] = NOT_GIVEN, + model: Union[str, ChatModel, NotGiven] = NOT_GIVEN, text_format: Union[type[TextFormatT], NotGiven] = NOT_GIVEN, # type: ignore[type-arg] tools: Union[Iterable[ParseableToolParam], NotGiven] = NOT_GIVEN, include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, @@ -655,7 +669,7 @@ async def parse( user=user, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) @@ -672,7 +686,7 @@ async def cancel( response_id=response_id, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) @@ -687,7 +701,6 @@ async def list( response_id: str, *, after: Union[str, NotGiven] = NOT_GIVEN, - before: Union[str, NotGiven] = NOT_GIVEN, include: Union[List[ResponseIncludable], NotGiven] = NOT_GIVEN, limit: Union[int, NotGiven] = NOT_GIVEN, order: Union[Literal["asc", "desc"], NotGiven] = NOT_GIVEN, @@ -700,13 +713,12 @@ async def list( response = await self.openai_client.responses.input_items.list( response_id=response_id, after=after, - before=before, include=include, limit=limit, order=order, extra_headers=extra_headers, extra_query=extra_query, - extra_body=extra_body, + extra_body={**(extra_body or {}), **kwargs}, timeout=timeout, ) diff --git a/portkey_ai/api_resources/apis/vector_stores.py b/portkey_ai/api_resources/apis/vector_stores.py index e6de8a15..3734ab50 100644 --- a/portkey_ai/api_resources/apis/vector_stores.py +++ b/portkey_ai/api_resources/apis/vector_stores.py @@ -1,5 +1,5 @@ import json -from typing import Any, Iterable, List, Optional, Union +from typing import Any, Dict, Iterable, List, Optional, Union import typing from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey @@ -229,6 +229,9 @@ def create_and_poll( file_id: str, *, vector_store_id: str, + attributes: Union[ + Optional[Dict[str, Union[str, float, bool]]], NotGiven + ] = NOT_GIVEN, poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, chunking_strategy: Union[Any, NotGiven] = NOT_GIVEN, **kwargs, @@ -236,6 +239,7 @@ def create_and_poll( response = self.openai_client.vector_stores.files.create_and_poll( file_id=file_id, vector_store_id=vector_store_id, + attributes=attributes, poll_interval_ms=poll_interval_ms, chunking_strategy=chunking_strategy, **kwargs, @@ -284,6 +288,9 @@ def upload_and_poll( *, vector_store_id: str, file: FileTypes, + attributes: Union[ + Optional[Dict[str, Union[str, float, bool]]], NotGiven + ] = NOT_GIVEN, poll_interval_ms: Union[int, NotGiven] = NOT_GIVEN, chunking_strategy: Union[Any, NotGiven] = NOT_GIVEN, **kwargs, @@ -291,6 +298,7 @@ def upload_and_poll( response = self.openai_client.vector_stores.files.upload_and_poll( vector_store_id=vector_store_id, file=file, + attributes=attributes, poll_interval_ms=poll_interval_ms, chunking_strategy=chunking_strategy, **kwargs, diff --git a/portkey_ai/api_resources/apis/webhooks.py b/portkey_ai/api_resources/apis/webhooks.py new file mode 100644 index 00000000..9731e84e --- /dev/null +++ b/portkey_ai/api_resources/apis/webhooks.py @@ -0,0 +1,75 @@ +from typing import Union +from portkey_ai._vendor.openai._types import HeadersLike +from portkey_ai._vendor.openai.types.webhooks.unwrap_webhook_event import ( + UnwrapWebhookEvent, +) +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey + + +class Webhooks(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def unwrap( + self, + payload: Union[str, bytes], + headers: HeadersLike, + *, + secret: Union[str, None] = None, + ) -> UnwrapWebhookEvent: + return self.openai_client.webhooks.unwrap( + payload=payload, + headers=headers, + secret=secret, + ) + + def verify_signature( + self, + payload: Union[str, bytes], + headers: HeadersLike, + *, + secret: Union[str, None] = None, + tolerance: int = 300, + ) -> None: + return self.openai_client.webhooks.verify_signature( + payload=payload, + headers=headers, + secret=secret, + tolerance=tolerance, + ) + + +class AsyncWebhooks(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def unwrap( + self, + payload: Union[str, bytes], + headers: HeadersLike, + *, + secret: Union[str, None] = None, + ) -> UnwrapWebhookEvent: + return self.openai_client.webhooks.unwrap( + payload=payload, + headers=headers, + secret=secret, + ) + + def verify_signature( + self, + payload: Union[str, bytes], + headers: HeadersLike, + *, + secret: Union[str, None] = None, + tolerance: int = 300, + ) -> None: + return self.openai_client.webhooks.verify_signature( + payload=payload, + headers=headers, + secret=secret, + tolerance=tolerance, + ) diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index cd25194c..1b233d15 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -53,6 +53,7 @@ def __init__( api_key: Optional[str] = None, virtual_key: Optional[str] = None, websocket_base_url: Optional[Union[str, httpx.URL]] = None, + webhook_secret: Optional[str] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -97,6 +98,7 @@ def __init__( self.api_key = default_api_key(self.base_url, api_key) self.virtual_key = virtual_key self.websocket_base_url = websocket_base_url + self.webhook_secret = webhook_secret self.config = config self.provider = provider self.trace_id = trace_id @@ -766,6 +768,7 @@ def __init__( api_key: Optional[str] = None, virtual_key: Optional[str] = None, websocket_base_url: Optional[Union[str, httpx.URL]] = None, + webhook_secret: Optional[str] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -810,6 +813,7 @@ def __init__( self.api_key = default_api_key(self.base_url, api_key) self.virtual_key = virtual_key self.websocket_base_url = websocket_base_url + self.webhook_secret = webhook_secret self.config = config self.provider = provider self.trace_id = trace_id diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index f840180f..de26b145 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -25,6 +25,7 @@ class Portkey(APIClient): fine_tuning: apis.FineTuning vector_stores: apis.VectorStores responses: apis.Responses + webhooks: apis.Webhooks evals: apis.Evals containers: apis.Containers admin: apis.Admin @@ -37,6 +38,8 @@ class Portkey(APIClient): collections: apis.Collections integrations: apis.Integrations providers: apis.Providers + realtime: apis.MainRealtime + conversations: apis.Conversations class beta: assistants: apis.Assistants @@ -57,6 +60,7 @@ def __init__( base_url: Optional[str] = None, virtual_key: Optional[str] = None, websocket_base_url: Optional[Union[str, httpx.URL]] = None, + webhook_secret: Optional[str] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -102,6 +106,7 @@ def __init__( base_url=base_url, virtual_key=virtual_key, websocket_base_url=websocket_base_url, + webhook_secret=webhook_secret, config=config, provider=provider, trace_id=trace_id, @@ -150,6 +155,7 @@ def __init__( http_client=http_client, max_retries=1, websocket_base_url=self.websocket_base_url, + webhook_secret=self.webhook_secret, ) self.completions = apis.Completion(self) @@ -167,6 +173,7 @@ def __init__( self.fine_tuning = apis.FineTuning(self) self.vector_stores = apis.VectorStores(self) self.responses = apis.Responses(self) + self.webhooks = apis.Webhooks(self) self.evals = apis.Evals(self) self.containers = apis.Containers(self) self.admin = apis.Admin(self) @@ -179,6 +186,8 @@ def __init__( self.collections = apis.Collections(self) self.integrations = apis.Integrations(self) self.providers = apis.Providers(self) + self.realtime = apis.MainRealtime(self) + self.conversations = apis.Conversations(self) self.beta = self.beta(self) # type: ignore if self.instrumentation: @@ -201,6 +210,7 @@ def copy( base_url: Optional[str] = None, virtual_key: Optional[str] = None, websocket_base_url: Optional[Union[str, httpx.URL]] = None, + webhook_secret: Optional[str] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -246,6 +256,7 @@ def copy( base_url=base_url or self.base_url, virtual_key=virtual_key or self.virtual_key, websocket_base_url=websocket_base_url or self.websocket_base_url, + webhook_secret=webhook_secret or self.webhook_secret, config=config or self.config, provider=provider or self.provider, trace_id=trace_id or self.trace_id, @@ -346,6 +357,7 @@ class AsyncPortkey(AsyncAPIClient): fine_tuning: apis.AsyncFineTuning vector_stores: apis.AsyncVectorStores responses: apis.AsyncResponses + webhooks: apis.AsyncWebhooks evals: apis.AsyncEvals containers: apis.AsyncContainers admin: apis.AsyncAdmin @@ -358,6 +370,8 @@ class AsyncPortkey(AsyncAPIClient): collections: apis.AsyncCollections integrations: apis.AsyncIntegrations providers: apis.AsyncProviders + realtime: apis.AsyncMainRealtime + conversations: apis.AsyncConversations class beta: assistants: apis.AsyncAssistants @@ -378,6 +392,7 @@ def __init__( base_url: Optional[str] = None, virtual_key: Optional[str] = None, websocket_base_url: Optional[Union[str, httpx.URL]] = None, + webhook_secret: Optional[str] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -423,6 +438,7 @@ def __init__( base_url=base_url, virtual_key=virtual_key, websocket_base_url=websocket_base_url, + webhook_secret=webhook_secret, config=config, provider=provider, trace_id=trace_id, @@ -471,6 +487,7 @@ def __init__( http_client=http_client, max_retries=1, websocket_base_url=self.websocket_base_url, + webhook_secret=self.webhook_secret, ) self.completions = apis.AsyncCompletion(self) @@ -488,6 +505,7 @@ def __init__( self.fine_tuning = apis.AsyncFineTuning(self) self.vector_stores = apis.AsyncVectorStores(self) self.responses = apis.AsyncResponses(self) + self.webhooks = apis.AsyncWebhooks(self) self.evals = apis.AsyncEvals(self) self.containers = apis.AsyncContainers(self) self.admin = apis.AsyncAdmin(self) @@ -500,6 +518,8 @@ def __init__( self.collections = apis.AsyncCollections(self) self.integrations = apis.AsyncIntegrations(self) self.providers = apis.AsyncProviders(self) + self.realtime = apis.AsyncMainRealtime(self) + self.conversations = apis.AsyncConversations(self) self.beta = self.beta(self) # type: ignore if self.instrumentation: @@ -522,6 +542,7 @@ def copy( base_url: Optional[str] = None, virtual_key: Optional[str] = None, websocket_base_url: Optional[Union[str, httpx.URL]] = None, + webhook_secret: Optional[str] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -567,6 +588,7 @@ def copy( base_url=base_url or self.base_url, virtual_key=virtual_key or self.virtual_key, websocket_base_url=websocket_base_url or self.websocket_base_url, + webhook_secret=webhook_secret or self.webhook_secret, config=config or self.config, provider=provider or self.provider, trace_id=trace_id or self.trace_id, diff --git a/portkey_ai/api_resources/types/conversation_type.py b/portkey_ai/api_resources/types/conversation_type.py new file mode 100644 index 00000000..06c0fafa --- /dev/null +++ b/portkey_ai/api_resources/types/conversation_type.py @@ -0,0 +1,47 @@ +import json +from typing import Any, Dict, Optional +import httpx +from .utils import parse_headers +from pydantic import BaseModel, PrivateAttr + + +class Conversation(BaseModel, extra="allow"): + id: Optional[str] = None + created_at: Optional[int] = None + metadata: Optional[object] = None + object: Optional[str] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + +class ConversationDeletedResource(BaseModel, extra="allow"): + id: Optional[str] = None + deleted: Optional[bool] = None + object: Optional[str] = None + + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/vendorize.toml b/vendorize.toml index 347e5239..426e3f8d 100644 --- a/vendorize.toml +++ b/vendorize.toml @@ -1,4 +1,4 @@ target = "portkey_ai/_vendor" packages = [ - "openai==1.86.0" + "openai==1.107.2" ] \ No newline at end of file