diff --git a/portkey_ai/__init__.py b/portkey_ai/__init__.py index e6ab0b31..1f538eb3 100644 --- a/portkey_ai/__init__.py +++ b/portkey_ai/__init__.py @@ -89,6 +89,10 @@ AsyncVirtualKeys, Logs, AsyncLogs, + BetaRealtime, + AsyncBetaRealtime, + BetaSessions, + AsyncBetaSessions, ) from portkey_ai.version import VERSION @@ -197,4 +201,8 @@ "AsyncVirtualKeys", "Logs", "AsyncLogs", + "BetaRealtime", + "AsyncBetaRealtime", + "BetaSessions", + "AsyncBetaSessions", ] diff --git a/portkey_ai/_vendor/openai-1.40.1.dist-info/INSTALLER b/portkey_ai/_vendor/openai-1.58.1.dist-info/INSTALLER similarity index 100% rename from portkey_ai/_vendor/openai-1.40.1.dist-info/INSTALLER rename to portkey_ai/_vendor/openai-1.58.1.dist-info/INSTALLER diff --git a/portkey_ai/_vendor/openai-1.40.1.dist-info/METADATA b/portkey_ai/_vendor/openai-1.58.1.dist-info/METADATA similarity index 77% rename from portkey_ai/_vendor/openai-1.40.1.dist-info/METADATA rename to portkey_ai/_vendor/openai-1.58.1.dist-info/METADATA index be05c0d3..95f639ae 100644 --- a/portkey_ai/_vendor/openai-1.40.1.dist-info/METADATA +++ b/portkey_ai/_vendor/openai-1.58.1.dist-info/METADATA @@ -1,6 +1,6 @@ -Metadata-Version: 2.3 +Metadata-Version: 2.4 Name: openai -Version: 1.40.1 +Version: 1.58.1 Summary: The official Python library for the openai API Project-URL: Homepage, https://github.com/openai/openai-python Project-URL: Repository, https://github.com/openai/openai-python @@ -14,7 +14,6 @@ Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: OS Independent Classifier: Operating System :: POSIX Classifier: Operating System :: POSIX :: Linux -Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 @@ -22,9 +21,8 @@ Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Typing :: Typed -Requires-Python: >=3.7.1 +Requires-Python: >=3.8 Requires-Dist: anyio<5,>=3.5.0 -Requires-Dist: cached-property; python_version < '3.8' Requires-Dist: distro<2,>=1.7.0 Requires-Dist: httpx<1,>=0.23.0 Requires-Dist: jiter<1,>=0.4.0 @@ -36,13 +34,15 @@ Provides-Extra: datalib Requires-Dist: numpy>=1; extra == 'datalib' Requires-Dist: pandas-stubs>=1.1.0.11; extra == 'datalib' Requires-Dist: pandas>=1.2.3; extra == 'datalib' +Provides-Extra: realtime +Requires-Dist: websockets<15,>=13; extra == 'realtime' Description-Content-Type: text/markdown # OpenAI Python API library [![PyPI version](https://img.shields.io/pypi/v/openai.svg)](https://pypi.org/project/openai/) -The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.7+ +The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). @@ -71,8 +71,7 @@ import os from openai import OpenAI client = OpenAI( - # This is the default and can be omitted - api_key=os.environ.get("OPENAI_API_KEY"), + api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted ) chat_completion = client.chat.completions.create( @@ -82,7 +81,7 @@ chat_completion = client.chat.completions.create( "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ) ``` @@ -91,6 +90,48 @@ we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) to add `OPENAI_API_KEY="My API Key"` to your `.env` file so that your API Key is not stored in source control. +### Vision + +With a hosted image: + +```python +response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": prompt}, + { + "type": "image_url", + "image_url": {"url": f"{img_url}"}, + }, + ], + } + ], +) +``` + +With the image as a base64 encoded string: + +```python +response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": prompt}, + { + "type": "image_url", + "image_url": {"url": f"data:{img_type};base64,{img_b64_str}"}, + }, + ], + } + ], +) +``` + ### Polling Helpers When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes @@ -151,8 +192,7 @@ import asyncio from openai import AsyncOpenAI client = AsyncOpenAI( - # This is the default and can be omitted - api_key=os.environ.get("OPENAI_API_KEY"), + api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted ) @@ -164,7 +204,7 @@ async def main() -> None: "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ) @@ -183,8 +223,13 @@ from openai import OpenAI client = OpenAI() stream = client.chat.completions.create( - model="gpt-4", - messages=[{"role": "user", "content": "Say this is a test"}], + messages=[ + { + "role": "user", + "content": "Say this is a test", + } + ], + model="gpt-4o", stream=True, ) for chunk in stream: @@ -194,6 +239,7 @@ for chunk in stream: The async client uses the exact same interface. ```python +import asyncio from openai import AsyncOpenAI client = AsyncOpenAI() @@ -230,7 +276,7 @@ openai.base_url = "https://..." openai.default_headers = {"x-foo": "true"} completion = openai.chat.completions.create( - model="gpt-4", + model="gpt-4o", messages=[ { "role": "user", @@ -252,6 +298,67 @@ We recommend that you always instantiate a client (e.g., with `client = OpenAI() - It's harder to mock for testing purposes - It's not possible to control cleanup of network connections +## Realtime API beta + +The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a WebSocket connection. + +Under the hood the SDK uses the [`websockets`](https://websockets.readthedocs.io/en/stable/) library to manage connections. + +The Realtime API works through a combination of client-sent events and server-sent events. Clients can send events to do things like update session configuration or send text and audio inputs. Server events confirm when audio responses have completed, or when a text response from the model has been received. A full event reference can be found [here](https://github.com/openai/openai-python/tree/main/platform.openai.com/docs/api-reference/realtime-client-events) and a guide can be found [here](https://platform.openai.com/docs/guides/realtime). + +Basic text based example: + +```py +import asyncio +from openai import AsyncOpenAI + +async def main(): + client = AsyncOpenAI() + + async with client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as connection: + await connection.session.update(session={'modalities': ['text']}) + + await connection.conversation.item.create( + item={ + "type": "message", + "role": "user", + "content": [{"type": "input_text", "text": "Say hello!"}], + } + ) + await connection.response.create() + + async for event in connection: + if event.type == 'response.text.delta': + print(event.delta, flush=True, end="") + + elif event.type == 'response.text.done': + print() + + elif event.type == "response.done": + break + +asyncio.run(main()) +``` + +However the real magic of the Realtime API is handling audio inputs / outputs, see this example [TUI script](https://github.com/openai/openai-python/blob/main/examples/realtime/push_to_talk_app.py) for a fully fledged example. + +### Realtime error handling + +Whenever an error occurs, the Realtime API will send an [`error` event](https://platform.openai.com/docs/guides/realtime/realtime-api-beta#handling-errors) and the connection will stay open and remain usable. This means you need to handle it yourself, as *no errors are raised directly* by the SDK when an `error` event comes in. + +```py +client = AsyncOpenAI() + +async with client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as connection: + ... + async for event in connection: + if event.type == 'error': + print(event.error.type) + print(event.error.code) + print(event.error.event_id) + print(event.error.message) +``` + ## Using types Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like: @@ -348,7 +455,7 @@ completion = client.chat.completions.create( "content": "Can you generate an example json object describing a fruit?", } ], - model="gpt-3.5-turbo-1106", + model="gpt-4o", response_format={"type": "json_object"}, ) ``` @@ -388,7 +495,7 @@ client = OpenAI() try: client.fine_tuning.jobs.create( - model="gpt-3.5-turbo", + model="gpt-4o", training_file="file-abc123", ) except openai.APIConnectionError as e: @@ -415,6 +522,24 @@ Error codes are as followed: | >=500 | `InternalServerError` | | N/A | `APIConnectionError` | +## Request IDs + +> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests) + +All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. + +```python +completion = await client.chat.completions.create( + messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-4" +) +print(completion._request_id) # req_123 +``` + +Note that unlike other properties that use an `_` prefix, the `_request_id` property +*is* public. Unless documented otherwise, *all* other `_` prefix properties, +methods and modules are *private*. + + ### Retries Certain errors are automatically retried 2 times by default, with a short exponential backoff. @@ -437,10 +562,10 @@ client.with_options(max_retries=5).chat.completions.create( messages=[ { "role": "user", - "content": "How can I get the name of the current day in Node.js?", + "content": "How can I get the name of the current day in JavaScript?", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ) ``` @@ -471,7 +596,7 @@ client.with_options(timeout=5.0).chat.completions.create( "content": "How can I list all files in a directory using Python?", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ) ``` @@ -485,12 +610,14 @@ Note that requests that time out are [retried twice by default](https://github.c We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module. -You can enable logging by setting the environment variable `OPENAI_LOG` to `debug`. +You can enable logging by setting the environment variable `OPENAI_LOG` to `info`. ```shell -$ export OPENAI_LOG=debug +$ export OPENAI_LOG=info ``` +Or to `debug` for more verbose logging. + ### How to tell whether `None` means `null` or missing In an API response, a field may be explicitly `null`, or missing entirely; in either case, its value is `None` in this library. You can differentiate the two cases with `.model_fields_set`: @@ -516,7 +643,7 @@ response = client.chat.completions.with_raw_response.create( "role": "user", "content": "Say this is a test", }], - model="gpt-3.5-turbo", + model="gpt-4o", ) print(response.headers.get('X-My-Header')) @@ -549,7 +676,7 @@ with client.chat.completions.with_streaming_response.create( "content": "Say this is a test", } ], - model="gpt-3.5-turbo", + model="gpt-4o", ) as response: print(response.headers.get("X-My-Header")) @@ -598,18 +725,19 @@ can also get all the extra fields on the Pydantic model as a dict with You can directly override the [httpx client](https://www.python-httpx.org/api/#client) to customize it for your use case, including: -- Support for proxies -- Custom transports +- Support for [proxies](https://www.python-httpx.org/advanced/proxies/) +- Custom [transports](https://www.python-httpx.org/advanced/transports/) - Additional [advanced](https://www.python-httpx.org/advanced/clients/) functionality ```python +import httpx from openai import OpenAI, DefaultHttpxClient client = OpenAI( # Or use the `OPENAI_BASE_URL` env var - base_url="http://my.test.server.example.com:8083", + base_url="http://my.test.server.example.com:8083/v1", http_client=DefaultHttpxClient( - proxies="http://my.test.proxy.example.com", + proxy="http://my.test.proxy.example.com", transport=httpx.HTTPTransport(local_address="0.0.0.0"), ), ) @@ -625,6 +753,16 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. +```py +from openai import OpenAI + +with OpenAI() as client: + # make requests here + ... + +# HTTP client is now closed +``` + ## Microsoft Azure OpenAI To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` @@ -679,6 +817,21 @@ We take backwards-compatibility seriously and work hard to ensure you can rely o We are keen for your feedback; please open an [issue](https://www.github.com/openai/openai-python/issues) with questions, bugs, or suggestions. +### Determining the installed version + +If you've upgraded to the latest version but aren't seeing any new features you were expecting then your python environment is likely still using an older version. + +You can determine the version that is being used at runtime with: + +```py +import openai +print(openai.__version__) +``` + ## Requirements -Python 3.7 or higher. +Python 3.8 or higher. + +## Contributing + +See [the contributing documentation](https://github.com/openai/openai-python/tree/main/./CONTRIBUTING.md). diff --git a/portkey_ai/_vendor/openai-1.40.1.dist-info/RECORD b/portkey_ai/_vendor/openai-1.58.1.dist-info/RECORD similarity index 50% rename from portkey_ai/_vendor/openai-1.40.1.dist-info/RECORD rename to portkey_ai/_vendor/openai-1.58.1.dist-info/RECORD index 573784d2..1ef0ad0d 100644 --- a/portkey_ai/_vendor/openai-1.40.1.dist-info/RECORD +++ b/portkey_ai/_vendor/openai-1.58.1.dist-info/RECORD @@ -1,347 +1,440 @@ -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/__main__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_base_client.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_client.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_compat.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_constants.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_exceptions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_extras/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_extras/_common.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_extras/numpy_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_extras/pandas_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_legacy_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_module_client.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_qs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_resource.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_streaming.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_types.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_utils/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_utils/_logs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_utils/_proxy.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_utils/_reflection.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_utils/_streams.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_utils/_sync.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_utils/_transform.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_utils/_typing.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_utils/_utils.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/_version.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_api/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_api/_main.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_api/audio.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_api/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_api/chat/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_api/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_api/files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_api/image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_api/models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_cli.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_errors.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_progress.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_tools/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_tools/_main.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_tools/fine_tunes.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_tools/migrate.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/cli/_utils.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/_old_api.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/_parsing/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/_parsing/_completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/_pydantic.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/_tools.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/_validators.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/azure.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/streaming/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/streaming/_assistants.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/streaming/_deltas.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/streaming/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/streaming/chat/_completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/streaming/chat/_events.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/lib/streaming/chat/_types.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/pagination.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/audio/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/audio/audio.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/audio/speech.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/audio/transcriptions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/audio/translations.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/batches.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/assistants.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/beta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/chat/chat.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/chat/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/threads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/threads/messages.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/threads/runs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/threads/runs/runs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/threads/runs/steps.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/threads/threads.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/vector_stores/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/vector_stores/file_batches.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/vector_stores/files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/beta/vector_stores/vector_stores.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/chat/chat.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/chat/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/completions.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/embeddings.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/files.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/fine_tuning/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/fine_tuning/fine_tuning.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/fine_tuning/jobs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/fine_tuning/jobs/checkpoints.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/fine_tuning/jobs/jobs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/images.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/models.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/moderations.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/uploads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/uploads/parts.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/resources/uploads/uploads.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/audio/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/audio/speech_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/audio/speech_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/audio/transcription.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/audio/transcription_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/audio/translation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/audio/translation_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/audio_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/batch.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/batch_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/batch_error.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/batch_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/batch_request_counts.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_response_format_option.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_response_format_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_stream_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_tool_choice.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_tool_choice_function.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_tool_choice_function_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_tool_choice_option.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_tool_choice_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_tool_choice_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/assistant_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/code_interpreter_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/code_interpreter_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/file_search_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/file_search_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/function_tool.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/function_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/thread.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/thread_create_and_run_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/thread_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/thread_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/thread_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/annotation_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/file_citation_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/file_citation_delta_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/file_path_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/file_path_delta_annotation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_file.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_file_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_file_content_block_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_file_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_file_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_file_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_url.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_url_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_url_content_block_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_url_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_url_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/image_url_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/message_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/message_content_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/message_content_part_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/message_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/message_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/message_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/message_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/message_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/message_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/refusal_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/refusal_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/required_action_function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/run.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/run_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/run_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/run_status.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/run_submit_tool_outputs_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/run_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/code_interpreter_logs.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/code_interpreter_output_image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/code_interpreter_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/file_search_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/file_search_tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/function_tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/message_creation_step_details.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/run_step.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/run_step_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/run_step_delta_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/run_step_delta_message_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/step_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/tool_call_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/tool_call_delta_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/runs/tool_calls_step_details.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/text_content_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/text_content_block_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/text_delta.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/threads/text_delta_block.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_store.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_store_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_store_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_store_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_store_update_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_stores/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_stores/file_batch_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_stores/file_batch_list_files_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_stores/file_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_stores/file_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_stores/vector_store_file.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_stores/vector_store_file_batch.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/beta/vector_stores/vector_store_file_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_assistant_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_chunk.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_content_part_image_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_content_part_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_content_part_refusal_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_content_part_text_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_function_call_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_function_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_message.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_message_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_message_tool_call_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_named_tool_choice_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_role.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_stream_options_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_system_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_token_logprob.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_tool_choice_option_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_tool_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_tool_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/chat_completion_user_message_param.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/completion_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/parsed_chat_completion.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat/parsed_function_tool_call.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/chat_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/completion.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/completion_choice.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/completion_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/completion_usage.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/create_embedding_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/embedding.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/embedding_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/file_content.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/file_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/file_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/file_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/file_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/fine_tuning_job.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/fine_tuning_job_event.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/fine_tuning_job_integration.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/fine_tuning_job_wandb_integration.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/job_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/job_list_events_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/job_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/jobs/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/jobs/checkpoint_list_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/image.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/image_create_variation_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/image_edit_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/image_generate_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/image_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/images_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/model_deleted.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/moderation.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/moderation_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/moderation_create_response.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/moderation_model.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared/error_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared/function_definition.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared/function_parameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared/response_format_json_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared/response_format_json_schema.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared/response_format_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared_params/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared_params/function_definition.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared_params/function_parameters.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared_params/response_format_json_object.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared_params/response_format_json_schema.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/shared_params/response_format_text.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/upload.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/upload_complete_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/upload_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/uploads/__init__.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/uploads/part_create_params.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/types/uploads/upload_part.cpython-39.pyc,, -../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-plhz866u/lib/python/openai/version.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/__main__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_base_client.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_client.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_compat.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_constants.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_exceptions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_extras/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_extras/_common.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_extras/numpy_proxy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_extras/pandas_proxy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_files.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_legacy_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_models.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_module_client.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_qs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_resource.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_streaming.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_types.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_utils/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_utils/_logs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_utils/_proxy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_utils/_reflection.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_utils/_streams.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_utils/_sync.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_utils/_transform.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_utils/_typing.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_utils/_utils.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/_version.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_api/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_api/_main.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_api/audio.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_api/chat/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_api/chat/completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_api/completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_api/files.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_api/image.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_api/models.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_cli.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_errors.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_models.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_progress.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_tools/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_tools/_main.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_tools/fine_tunes.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_tools/migrate.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/cli/_utils.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/_old_api.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/_parsing/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/_parsing/_completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/_pydantic.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/_tools.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/_validators.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/azure.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/streaming/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/streaming/_assistants.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/streaming/_deltas.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/streaming/chat/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/streaming/chat/_completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/streaming/chat/_events.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/lib/streaming/chat/_types.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/pagination.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/audio/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/audio/audio.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/audio/speech.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/audio/transcriptions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/audio/translations.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/batches.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/assistants.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/beta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/chat/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/chat/chat.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/chat/completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/realtime/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/realtime/realtime.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/realtime/sessions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/threads/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/threads/messages.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/threads/runs/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/threads/runs/runs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/threads/runs/steps.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/threads/threads.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/vector_stores/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/vector_stores/file_batches.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/vector_stores/files.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/beta/vector_stores/vector_stores.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/chat/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/chat/chat.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/chat/completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/completions.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/embeddings.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/files.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/fine_tuning/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/fine_tuning/fine_tuning.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/fine_tuning/jobs/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/fine_tuning/jobs/checkpoints.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/fine_tuning/jobs/jobs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/images.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/models.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/moderations.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/uploads/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/uploads/parts.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/resources/uploads/uploads.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/speech_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/speech_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/transcription.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/transcription_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/transcription_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/transcription_segment.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/transcription_verbose.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/transcription_word.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/translation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/translation_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/translation_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio/translation_verbose.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/audio_response_format.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/batch.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/batch_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/batch_error.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/batch_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/batch_request_counts.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_response_format_option.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_response_format_option_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_stream_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_tool_choice.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_tool_choice_function.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_tool_choice_function_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_tool_choice_option.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_tool_choice_option_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_tool_choice_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/assistant_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/auto_file_chunking_strategy_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/chat/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/code_interpreter_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/code_interpreter_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/file_chunking_strategy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/file_chunking_strategy_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/file_search_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/file_search_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/function_tool.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/function_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/other_file_chunking_strategy_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_content_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_create_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_create_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_delete_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_delete_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_deleted_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_truncate_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_truncate_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/conversation_item_truncated_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/error_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/input_audio_buffer_append_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/input_audio_buffer_append_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/input_audio_buffer_clear_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/input_audio_buffer_clear_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/input_audio_buffer_cleared_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/input_audio_buffer_commit_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/input_audio_buffer_commit_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/input_audio_buffer_committed_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/input_audio_buffer_speech_started_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/rate_limits_updated_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/realtime_client_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/realtime_client_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/realtime_connect_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/realtime_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/realtime_response_status.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/realtime_response_usage.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/realtime_server_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_audio_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_audio_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_audio_transcript_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_audio_transcript_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_cancel_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_cancel_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_content_part_added_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_content_part_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_create_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_create_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_function_call_arguments_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_function_call_arguments_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_output_item_added_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_output_item_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_text_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/response_text_done_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/session.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/session_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/session_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/session_created_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/session_update_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/session_update_event_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/realtime/session_updated_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/static_file_chunking_strategy.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/static_file_chunking_strategy_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/static_file_chunking_strategy_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/thread.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/thread_create_and_run_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/thread_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/thread_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/thread_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/annotation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/annotation_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/file_citation_annotation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/file_citation_delta_annotation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/file_path_annotation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/file_path_delta_annotation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_file.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_file_content_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_file_content_block_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_file_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_file_delta_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_file_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_url.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_url_content_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_url_content_block_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_url_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_url_delta_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/image_url_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/message.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/message_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/message_content_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/message_content_part_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/message_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/message_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/message_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/message_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/message_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/message_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/refusal_content_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/refusal_delta_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/required_action_function_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/run.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/run_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/run_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/run_status.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/run_submit_tool_outputs_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/run_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/code_interpreter_logs.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/code_interpreter_output_image.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/code_interpreter_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/file_search_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/file_search_tool_call_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/function_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/function_tool_call_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/message_creation_step_details.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/run_step.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/run_step_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/run_step_delta_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/run_step_delta_message_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/run_step_include.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/step_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/step_retrieve_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/tool_call_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/tool_call_delta_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/runs/tool_calls_step_details.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/text.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/text_content_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/text_content_block_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/text_delta.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/threads/text_delta_block.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_store.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_store_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_store_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_store_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_store_update_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_stores/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_stores/file_batch_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_stores/file_batch_list_files_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_stores/file_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_stores/file_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_stores/vector_store_file.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_stores/vector_store_file_batch.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/beta/vector_stores/vector_store_file_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_assistant_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_audio.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_audio_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_chunk.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_content_part_image_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_content_part_input_audio_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_content_part_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_content_part_refusal_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_content_part_text_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_developer_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_function_call_option_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_function_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_message.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_message_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_message_tool_call_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_modality.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_named_tool_choice_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_prediction_content_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_reasoning_effort.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_role.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_stream_options_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_system_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_token_logprob.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_tool_choice_option_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_tool_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_tool_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/chat_completion_user_message_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/completion_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/parsed_chat_completion.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat/parsed_function_tool_call.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/chat_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/completion.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/completion_choice.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/completion_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/completion_usage.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/create_embedding_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/embedding.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/embedding_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/embedding_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/file_content.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/file_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/file_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/file_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/file_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/file_purpose.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/fine_tuning_job.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/fine_tuning_job_event.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/fine_tuning_job_integration.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/fine_tuning_job_wandb_integration.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/job_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/job_list_events_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/job_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/jobs/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/jobs/checkpoint_list_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/image.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/image_create_variation_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/image_edit_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/image_generate_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/image_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/images_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/model_deleted.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/moderation.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/moderation_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/moderation_create_response.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/moderation_image_url_input_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/moderation_model.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/moderation_multi_modal_input_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/moderation_text_input_param.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared/error_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared/function_definition.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared/function_parameters.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared/response_format_json_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared/response_format_json_schema.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared/response_format_text.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared_params/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared_params/function_definition.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared_params/function_parameters.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared_params/response_format_json_object.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared_params/response_format_json_schema.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/shared_params/response_format_text.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/upload.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/upload_complete_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/upload_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/uploads/__init__.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/uploads/part_create_params.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/uploads/upload_part.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/types/websocket_connection_options.cpython-39.pyc,, +../../../../../../../../../Users/chandeep/Library/Caches/com.apple.python/private/var/folders/9_/nx5thgw547s1zlwxzhbjtb1m0000gn/T/pip-target-_6i4wnmx/lib/python/openai/version.cpython-39.pyc,, ../../bin/openai,sha256=hl2E5BbKWVfkczcW65l8G1zyeJ3Si5m9TUnp5aG8gtY,276 -openai-1.40.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -openai-1.40.1.dist-info/METADATA,sha256=E4y7Ouv_ZSZBT7WuW6u1fIcdVyrMVjYVgunbAtMATcw,22163 -openai-1.40.1.dist-info/RECORD,, -openai-1.40.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -openai-1.40.1.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87 -openai-1.40.1.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43 -openai-1.40.1.dist-info/licenses/LICENSE,sha256=d0M6HDjQ76tf255XPlAGkIoECMe688MXcGEYsOFySfI,11336 -openai/__init__.py,sha256=YhCuMuxZHoRn6BnOxawEFt8fRZPnhBWGongW3CP-F3k,10191 +openai-1.58.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +openai-1.58.1.dist-info/METADATA,sha256=bMFx74-7aYzZDEuWPpWEycqxo8YvoA5fXXk4sCy6ygU,27285 +openai-1.58.1.dist-info/RECORD,, +openai-1.58.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +openai-1.58.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +openai-1.58.1.dist-info/entry_points.txt,sha256=kAYhQEmziJwsKs5raYAIOvJ2LWmbz5dulEXOzsY71ro,43 +openai-1.58.1.dist-info/licenses/LICENSE,sha256=d0M6HDjQ76tf255XPlAGkIoECMe688MXcGEYsOFySfI,11336 +openai/__init__.py,sha256=HAeDtlHRw1QQs5BIDoE0k7bA7xOP5m7F4tYjXt_9c64,10209 openai/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30 -openai/_base_client.py,sha256=_ywUgO9OA-ORypQlYrUUgHgUgGFYPMEWHW0DLLCVUBc,67506 -openai/_client.py,sha256=PXHky30KYjUMIH8WV7PjKcOAULO9-36AbN8y1DCFu70,22233 -openai/_compat.py,sha256=DvCJ4A6g0azYhKPpLKXoi1eYXpwhukMGUxJclk2DtUA,6832 +openai/_base_client.py,sha256=uWXGJEkKDgXxPEpz0-G5yjuJR28FTIhBEh6ijOiEWMA,69144 +openai/_client.py,sha256=FJRGkrdpHAFV2TOs04tO5uyKCA-cudlk4BlvCX3KI3Q,23355 +openai/_compat.py,sha256=Mtzi28qOK99ZBPcGcQqdjoUFk2MzzpqjaafjuwQ4NO0,6982 openai/_constants.py,sha256=L1pfEhuz_wM2w2_U9P_9JZzTbrN4pbLo207l96rtKcQ,469 -openai/_exceptions.py,sha256=QoeAOAeOatAOxt80wJShYUQti-QQc2Pur45IxiWN-zk,4376 +openai/_exceptions.py,sha256=2BEuXwqce9z7X6lWLLXRqg1vOay_q-OdLz9lcj6Pluw,4798 openai/_extras/__init__.py,sha256=LZbJLZ7aFHRcI7uiY4-wFQTdMp-BF6FER1QMhKVFkWk,107 openai/_extras/_common.py,sha256=NWWtgbdJsO3hQGQxaXGfVk0LjeIE5AFZ8VS_795hhMc,364 openai/_extras/numpy_proxy.py,sha256=hwZXa_JBAPD5taRhor1tGxK26g5IaK52JclQDl-dky0,799 openai/_extras/pandas_proxy.py,sha256=NCEt1Dqwc_0H85YdsWPDE3lPDJtYnBT8G-gJE_BCeEc,637 openai/_files.py,sha256=WEf6hxJN1u3pVkdnPCpinhxCUnOV2olt4J6vLoJ_k48,3616 -openai/_legacy_response.py,sha256=7NyYX04zAjuGGypOjOWwS7hljQq5OSyR-6B6_w3_q6g,15781 -openai/_models.py,sha256=3METTz2oSlPfVcHL0fZ-L5IPnvckZDkgEe1H58mRsFE,28210 +openai/_legacy_response.py,sha256=YBL2OTX7W139lVpcVHnNTsHRPNJxWHBAw6ZZHqnL2fs,16046 +openai/_models.py,sha256=k3sTshX4q5XbZy__I4bchWrYAtID1alxVdGOn_4jGQ4,30343 openai/_module_client.py,sha256=gF_2bbdosIwUt29sQgrQRJOgNREvXF-IDxe4XKGhHjY,2523 openai/_qs.py,sha256=AOkSz4rHtK4YI3ZU_kzea-zpwBUgEY8WniGmTPyEimc,4846 openai/_resource.py,sha256=IQihFzFLhGOiGSlT2dO1ESWSTg2XypgbtAldtGdTOqU,1100 -openai/_response.py,sha256=UW8TM-E4YE6UzhKcWOoGvBH3cVKh-aFe1yEL7wZaMIQ,29018 +openai/_response.py,sha256=Juwnj0AMWnHc8HDjtdcQQpMIDyX170hzZPXaAK1e9Qw,29387 openai/_streaming.py,sha256=t1UZrg53fVJB5Rs6k2sT9PBbvjp-IGrQzUq_5nlxKG4,13102 -openai/_types.py,sha256=77A36sAUMgrgTX3zNo2NKU_wbQZgoZWjGTwf3GTOGTc,6202 -openai/_utils/__init__.py,sha256=Uzq1-FIih_VUjzdNVWXks0sdC39KBKLMrZoz-_JOjJ4,1988 -openai/_utils/_logs.py,sha256=sFA_NejuNObTGGbfsXC03I38mrT9HjsgAJx4d3GP0ok,774 +openai/_types.py,sha256=GxKqy9_2_AUqbaRROzqhCJ47a7c-q_T6Bu8kV9a2qhA,6242 +openai/_utils/__init__.py,sha256=0yN65NkadWbqEFhfcHlJje-9jEHBC14ibHb9nC4ClOg,2130 +openai/_utils/_logs.py,sha256=IC5iwPflwelNpJEpWsvK3up-pol5hR8k_VL9fSukk_Y,1351 openai/_utils/_proxy.py,sha256=z3zsateHtb0EARTWKk8QZNHfPkqJbqwd1lM993LBwGE,1902 -openai/_utils/_reflection.py,sha256=ZmGkIgT_PuwedyNBrrKGbxoWtkpytJNU1uU4QHnmEMU,1364 +openai/_utils/_reflection.py,sha256=aTXm-W0Kww4PJo5LPkUnQ92N-2UvrK1-D67cJVBlIgw,1426 openai/_utils/_streams.py,sha256=SMC90diFFecpEg_zgDRVbdR3hSEIgVVij4taD-noMLM,289 -openai/_utils/_sync.py,sha256=9ex9pfOyd8xAF1LxpFx4IkqL8k0vk8srE2Ee-OTMQ0A,2840 -openai/_utils/_transform.py,sha256=NCz3q9_O-vuj60xVe-qzhEQ8uJWlZWJTsM-GwHDccf8,12958 -openai/_utils/_typing.py,sha256=tFbktdpdHCQliwzGsWysgn0P5H0JRdagkZdb_LegGkY,3838 -openai/_utils/_utils.py,sha256=LMVTMZG8pfu8AkJNSfmv_z3guQlOfm2UxDTjTTXggfg,11411 -openai/_version.py,sha256=byQHxbhaxiwEt7VT71Cengl03CfF1u54tElKLb-aRqM,159 +openai/_utils/_sync.py,sha256=03JeD-UR_e2O8dJEtD-v4zcyhlEpFkrcH8bgrSJMrxI,2437 +openai/_utils/_transform.py,sha256=Dkkyr7OveGmOolepcvXmVJWE3kqim4b0nM0h7yWbgeY,13468 +openai/_utils/_typing.py,sha256=nTJz0jcrQbEgxwy4TtAkNxuU0QHHlmc6mQtA6vIR8tg,4501 +openai/_utils/_utils.py,sha256=8UmbPOy_AAr2uUjjFui-VZSrVBHRj6bfNEKRp5YZP2A,12004 +openai/_version.py,sha256=DKLfLGiThHOxb7_mxxvh3kOxCAzNPvebOIc6-n1St50,159 openai/cli/__init__.py,sha256=soGgtqyomgddl92H0KJRqHqGuaXIaghq86qkzLuVp7U,31 openai/cli/_api/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 openai/cli/_api/_main.py,sha256=5yyfLURqCEaAN8B61gHaqVAaYgtyb9Xq0ncQ3P2BAh0,451 @@ -352,95 +445,105 @@ openai/cli/_api/completions.py,sha256=ysOmnbXpFz3VB5N_5USPdObiYew62vEn6rMtNFwTJG openai/cli/_api/files.py,sha256=6nKXFnsC2QE0bGnVUAG7BTLSu6K1_MhPE0ZJACmzgRY,2345 openai/cli/_api/image.py,sha256=ovBExdn8oUK9ImOpsPafesfAlmcftLP2p7d37hcUtKU,5062 openai/cli/_api/models.py,sha256=pGmIGZToj3raGGpKvPSq_EVUR-dqg4Vi0PNfZH98D2E,1295 -openai/cli/_cli.py,sha256=WxqTnhVVtfzX0z7hV5fcvd3hkihaUgwOWpXOwyCS4Fc,6743 +openai/cli/_cli.py,sha256=o6zWCnq84u-DIGZuR9YoOUxTGTpx-oCU5mgAKDi555c,6779 openai/cli/_errors.py,sha256=nejlu1HnOyAIr2n7uqpFtWn8XclWj_9N8FwgfT3BPK8,471 openai/cli/_models.py,sha256=tgsldjG216KpwgAZ5pS0sV02FQvONDJU2ElA4kCCiIU,491 openai/cli/_progress.py,sha256=aMLssU9jh-LoqRYH3608jNos7r6vZKnHTRlHxFznzv4,1406 openai/cli/_tools/__init__.py,sha256=cj92MZq-9_1PQM8A4TQVsqKn5mcTDAGxHllJ0UvJOPE,58 openai/cli/_tools/_main.py,sha256=pakjEXHRHqYlTml-RxV7fNrRtRXzmZBinoPi1AJipFY,467 openai/cli/_tools/fine_tunes.py,sha256=RQgYMzifk6S7Y1I1K6huqco2QxmXa7gVUlHl6SrKTSU,1543 -openai/cli/_tools/migrate.py,sha256=GD3zHR700FRIhdx3gBqIrRLPrKjx4pDAKUgvnO0J2ug,5013 +openai/cli/_tools/migrate.py,sha256=o-iomzhtC6N6X5H5GDlgQ_QOaIovE2YA9oHc_tIAUj8,4497 openai/cli/_utils.py,sha256=oiTc9MnxQh_zxAZ1OIHPkoDpCll0NF9ZgkdFHz4T-Bs,848 openai/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224 openai/lib/__init__.py,sha256=BMTfMnlbugMgDA1STDIAlx4bI4t4l_8bQmJxd0th0n8,126 openai/lib/_old_api.py,sha256=XZnXBrEKuTd70iJirj5mGW35fZoqruJobbBTq6bvg10,1947 openai/lib/_parsing/__init__.py,sha256=wS3BYvMGj9TqiPqOe3rO1sleaAJqHVuCaQuCE5rZIUw,539 -openai/lib/_parsing/_completions.py,sha256=fXPhBD67s50THskYzBeueqo7B77toGRfZ3oaYv_tKVE,8601 -openai/lib/_pydantic.py,sha256=_bT7JPIsCTuznyA4GnyRE6tbmw9eIfjQ6wBsVAR7Xn4,2364 +openai/lib/_parsing/_completions.py,sha256=I1KpjdI9p8Me-nsLF2szjEYF_7x4k28WGH5GdZeKpzI,9138 +openai/lib/_pydantic.py,sha256=ndHdDDSEGg8Jbhc7JvLQHiIrZwLR36bCcUAlzwLmOdk,5282 openai/lib/_tools.py,sha256=xrzM7jNgehZGsRQ9kSgn1q33z9cHrgf0b8UMo5wrTFw,1501 openai/lib/_validators.py,sha256=cXJXFuaAl7jeJcYHXXnFa4NHGtHs-_zt3Zs1VVCmQo4,35288 -openai/lib/azure.py,sha256=iaiopzv8xI_JlYdtgEmnu32krLm0YiG44cDs_ictlgw,21536 +openai/lib/azure.py,sha256=s2jrnQmfvlpNREe4A7QcmeXeIuuI9zIWG3ltyfvuQOM,22450 openai/lib/streaming/__init__.py,sha256=kD3LpjsqU7caDQDhB-YjTUl9qqbb5sPnGGSI2yQYC70,379 -openai/lib/streaming/_assistants.py,sha256=_t1R-cTCXmKfQ3aLwSqGnYqjOxJIWhoWSvDHqIFCaPw,40575 +openai/lib/streaming/_assistants.py,sha256=LUWSinmYopQIkQ5xSg73b6BWbkRkQS5JvX62w_V9xSw,40692 openai/lib/streaming/_deltas.py,sha256=I7B_AznXZwlBmE8Puau7ayTQUx6hMIEVE8FYTQm2fjs,2502 -openai/lib/streaming/chat/__init__.py,sha256=d243EsKxxHQ_MpUxecmYdLy4ZRVY6BKhL6QNSfLdtRY,1245 -openai/lib/streaming/chat/_completions.py,sha256=Kje6_zfjFKxk0NIOSc9JZ_YxkhLKSlb8-weEtzt93Sc,28604 +openai/lib/streaming/chat/__init__.py,sha256=7krL_atOvvpQkY_byWSglSfDsMs5hdoxHmz4Ulq7lcc,1305 +openai/lib/streaming/chat/_completions.py,sha256=icXzr6TwaQvOOEZHRLIfw106YVUT9mLGjQt6QJ1ObKI,29944 openai/lib/streaming/chat/_events.py,sha256=lstVmM6YR2Cs9drikzrY9JCZn9Nbfym0aKIPtNpxL6w,2618 openai/lib/streaming/chat/_types.py,sha256=-SYVBNhGkOUoJ-8dotxpCRqPJpfyOQ8hwR2_HrsQCRI,739 openai/pagination.py,sha256=B9ejXEAR_hYGLHfqb9xEEsE0u5dCUMjvplOce5dpY7M,2760 openai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 openai/resources/__init__.py,sha256=eYonVyf6AAmk-b8JYSYmo5EEMv89ovxiAY5A83ti8J8,4533 openai/resources/audio/__init__.py,sha256=YM7FHvPKVlj_v6EIgfpUQsb6q4hS2hVQ3gfkgic0sP0,1687 -openai/resources/audio/audio.py,sha256=1HHcDRWT58KshYelRdSnJs-0bvMBRS1vOhnU-h_oP5s,4481 -openai/resources/audio/speech.py,sha256=s93RA77J_uS1Nu95dnrbbIahE-U-Csr9RozgqMFxB54,7885 -openai/resources/audio/transcriptions.py,sha256=acLwtVFCuTuG4OYHdzJlRq70hAMLzWV8YJGGc64EBOU,11075 -openai/resources/audio/translations.py,sha256=QFrFjB3JkruF7MPCJfsgGDRSlyKHAEdJLaF-OdhR3UY,8979 -openai/resources/batches.py,sha256=XuXe_9xb_x-Kq0myKTkcKHQqCf3OHhhNIXr4295mNHM,18560 +openai/resources/audio/audio.py,sha256=MMJHbfXmyYmQU7dF8XsD0YOIqdlG3gtxUqTihOuVx8o,5499 +openai/resources/audio/speech.py,sha256=yPoi_Xozv0Yuikbf2dxhAyRdN2q_sWDQoHNCxUayC-E,8903 +openai/resources/audio/transcriptions.py,sha256=4X71pe1lvelNRPSlHy2jAtIMyETYwWieLShBdr12MN0,18507 +openai/resources/audio/translations.py,sha256=4Y-ognKnSi72qhwX8FCKB-5JhvaAS2Wnq2ivTFmpUoU,15711 +openai/resources/batches.py,sha256=8wb-oy81IkxpABjT_11JKP7nzTmGmP35lD6WGecWmn8,19578 openai/resources/beta/__init__.py,sha256=nXoV4P8WCrbEZuNMtptbIuy_LqlVafY9lJ2qfW35GFc,1636 -openai/resources/beta/assistants.py,sha256=2zxpTMThsrleTO_jQiHyvJT0mu2Tth2pS5DKpO5B2UI,39567 -openai/resources/beta/beta.py,sha256=Ys5tPrAmvr7_CmHJLKhnsQqWXGo5seX8-rP8HhXwk0w,4702 +openai/resources/beta/assistants.py,sha256=j1BE3q4aCGzridJ8wyhzn0FeI3Gvy56jRK57EA-SuXk,40533 +openai/resources/beta/beta.py,sha256=D9mhIg_Qc0tUq23AVRUI6Z1WRF_ekeNG5sHeRYyhFXk,6602 openai/resources/beta/chat/__init__.py,sha256=d_fpyFMAG3iRAPIXANPfRG4HtEm6U_uMUYep7Skj2uY,263 openai/resources/beta/chat/chat.py,sha256=sNvU8Fi_o3dWkD_X4Mobafv9XWBP6Y2dJxng-NdFXUs,597 -openai/resources/beta/chat/completions.py,sha256=L1G6JsVjXT1NoWJdE6Z_xdjc1MXh69gppipaHG4LO94,20280 +openai/resources/beta/chat/completions.py,sha256=Z_x_hxpemrmROMrfyx6dUALppPuqNgswgW9YQ3ngHYI,28553 +openai/resources/beta/realtime/__init__.py,sha256=0TBjHlLRsG-hudbiE8f-EXETNkDRAxqkCVAgODiUnYo,862 +openai/resources/beta/realtime/realtime.py,sha256=mbzDEKD90pt06pBg_A44t-xZnQTInwP83WuOlwgBDTY,36992 +openai/resources/beta/realtime/sessions.py,sha256=i53-QVMaqK3sGP22gh250kANFlRbP4V-g8uffWzKHS8,16093 openai/resources/beta/threads/__init__.py,sha256=fQ_qdUVSfouVS5h47DlTb5mamChT4K-v-siPuuAB6do,1177 -openai/resources/beta/threads/messages.py,sha256=hD_f8FOJ-qfkByJzeZv-YxaNvxUIVKfsEf1HNy_IEOo,26346 +openai/resources/beta/threads/messages.py,sha256=LBjgJAK-0g_lkhIX2WG6qNT0RzSTknO0nRlqkVQw-B8,27372 openai/resources/beta/threads/runs/__init__.py,sha256=2FfDaqwmJJCd-IVpY_CrzWcFvw0KFyQ3cm5jnTfI-DQ,771 -openai/resources/beta/threads/runs/runs.py,sha256=q3LJ5EaOwTIeuu2Li4JtH3HXj3WOcevL_7Lozj_1MOY,137198 -openai/resources/beta/threads/runs/steps.py,sha256=uRykb4JapSNZCF8OD54f5qOWtrp2GoU1k5uAZgA4kAk,12223 -openai/resources/beta/threads/threads.py,sha256=943MpBSJUcyf46KcAvE-JUIdmcausRd_oceYFAZ0GgE,93208 +openai/resources/beta/threads/runs/runs.py,sha256=7sPjaxa8Th6aXDeils1G8VKA9_2wsyjGUs5kJh3M50I,142593 +openai/resources/beta/threads/runs/steps.py,sha256=VlGD9NXtNqOt3uwlnepCavW7v3uVlvvyi0X1h9WZ_-E,15817 +openai/resources/beta/threads/threads.py,sha256=qGh4H0-42NhJHwPpyAYZlGx1ZgssFARJ45fhEDCyDQU,94238 openai/resources/beta/vector_stores/__init__.py,sha256=11Xn1vhgndWiI0defJHv31vmbtbDgh2GwZT3gX8GgHk,1296 -openai/resources/beta/vector_stores/file_batches.py,sha256=rQvUKeBmTxMFYmUN1s67rqhKk_2TtmVtrCnz-urevtM,30849 -openai/resources/beta/vector_stores/files.py,sha256=wKWpbUIMLdAcTsMIprG7pVlQ1DFdA924AxWsYhm1S34,28572 -openai/resources/beta/vector_stores/vector_stores.py,sha256=DTzqhJ73TfNwVmfHZwZFz-qZdAzqPd2v4YUptUmOdhI,27809 +openai/resources/beta/vector_stores/file_batches.py,sha256=EomxymvX4oCIRXUAfKGShAYWqnv1vlAahcp_Wa7Kt7Y,31985 +openai/resources/beta/vector_stores/files.py,sha256=LjN6Zazb4dGV-xeQ-XRKAVciXsFj7LXh90AKJgVQ-Cw,29724 +openai/resources/beta/vector_stores/vector_stores.py,sha256=OnzaEjKov8npQQf9YSYljPOTNBzjfwmxfW_D7f7fLkQ,28916 openai/resources/chat/__init__.py,sha256=8Q9ODRo1wIpFa34VaNwuaWFmxqFxagDtUhIAkQNvxEU,849 -openai/resources/chat/chat.py,sha256=Edexhbq1anfSS_I0wNRQb7rx1OV6-rq4sxgVlYDGb6Y,2342 -openai/resources/chat/completions.py,sha256=ZDneiCx57nohjlC3ENOuGrNU27JUZ0vNn7rhpzYekJk,76132 -openai/resources/completions.py,sha256=4Rfv9o3XwI5GRfhN1RD4tEgNn0I2jb6TRW6j0b6bpZc,58712 -openai/resources/embeddings.py,sha256=cMSXtMc_7mBqlSiQ99B7qXYoRLGyoeIFazyYQ0jJ1O4,10755 -openai/resources/files.py,sha256=InC0e28vgMKM5pNadUhGGvDQ0Rvb1D4WcEDfanFJV2E,27156 +openai/resources/chat/chat.py,sha256=hvYn24it5ARq8BYloSWn5kqqSlBEcYvVdQTf3ujxuV0,3360 +openai/resources/chat/completions.py,sha256=VL61UVRPoI7JuNj6b4k4G2g8Ew0mu2WfLJbtUbW_XuM,99603 +openai/resources/completions.py,sha256=5W3UuTH0V-vpTIkb8-r7gyS0Qp7tx3JZMWZkHBGIjPY,59460 +openai/resources/embeddings.py,sha256=PfwI3PKKPkmLs7wHijO-1pOwW6Fjs5Rqzpy0ALLYgAs,11655 +openai/resources/files.py,sha256=PL7b1lM7s3uJD7CvZcM_9f54kAlhBo913o31z1uXt-0,30093 openai/resources/fine_tuning/__init__.py,sha256=s6uoq7gM4gwoywdOOZQkPeYiSbUl-OwpeuMhwJJk0lc,837 -openai/resources/fine_tuning/fine_tuning.py,sha256=-2k4d5ZDlCIoqonSOMtGLVl4Kk9n2yJoKvVMG3PoWW8,2410 +openai/resources/fine_tuning/fine_tuning.py,sha256=yfXXcR8IMRHkS-xnoT_nF7WEa2fjprDO-0ND-juPqhk,3394 openai/resources/fine_tuning/jobs/__init__.py,sha256=_smlrwijZOCcsDWqKnofLxQM2QLucZzXgboL9zJBPHw,849 -openai/resources/fine_tuning/jobs/checkpoints.py,sha256=6uP1CCGkzE_n8FsVdTQ36eH_eiq24wOxQQ5zzOy0UEU,6456 -openai/resources/fine_tuning/jobs/jobs.py,sha256=6GOXP6GTRlmYeDaVA_FK0QDioudPtty7qNH4uo-_j58,27502 -openai/resources/images.py,sha256=1oKEnEalLjSxnyOe-Zzk3CsL9ou876ePUgOh8vtUc3I,24616 -openai/resources/models.py,sha256=XF3E56V62YZq-HrStUDDvfrT2RHj98P8Y-oOrPSPRX0,10222 -openai/resources/moderations.py,sha256=Jc6m5wsaWcqx9ls8HORqwqKZ-a7xy4mSpD3R3J-n7nc,6603 +openai/resources/fine_tuning/jobs/checkpoints.py,sha256=LIJUhxb8hgxEgHdTFKdyb0Q-hnV4ccIprvFpQJI97ho,7474 +openai/resources/fine_tuning/jobs/jobs.py,sha256=kZLZaWRW6ynhLknoOaK64LW9XifzsSOpFHWX8VPjJcs,29392 +openai/resources/images.py,sha256=PS7PIe1X8tccsqLtd-4kx1OTzCow0S-C-L29bmVyV4c,25634 +openai/resources/models.py,sha256=qJj0Cpy_Ok9ELag8VxqTefX8tw7RPgIZ8-a6qllxl8w,11240 +openai/resources/moderations.py,sha256=H9tygVKuT1c25LW_XyrhpK9nlT72SsEYDiPolQBP7hs,7805 openai/resources/uploads/__init__.py,sha256=HmY3WQgvUI2bN3CjfWHWQOk7UUC6Ozna97_lHhrrRSA,810 -openai/resources/uploads/parts.py,sha256=8xBjww6Na7qx6QVTG-lyuKzgF1gs7ldP_VwnwMFFjd8,7132 -openai/resources/uploads/uploads.py,sha256=OpU8cdmgqm086JeDQ_Ck0_wiIFmpclTHwdMshyTjRP8,17618 -openai/types/__init__.py,sha256=vXLJXHVOMJwdhEZceBW9O6msKWH6Hmvs6G6XmN8MULQ,2571 -openai/types/audio/__init__.py,sha256=7PRM0dwBGCBKcF_hkDHx2tVUdEW5jbwU8bFB9Vbtq-g,547 -openai/types/audio/speech_create_params.py,sha256=Q7EqgD5F5CV0tANvz30msMfYD4EgqGUZn4V4yDypSe4,1300 +openai/resources/uploads/parts.py,sha256=NEMRVCqOOYJV2zTmBau9UtY2qXuB_yDJzzXTJ1XubUY,8150 +openai/resources/uploads/uploads.py,sha256=ft7cVZuDxphjdCV6BcS6Zs2qE3zD1RB57udvaGUR9HY,24918 +openai/types/__init__.py,sha256=GxEEa9qy8CKZVCU1wY4PokDUCq-fD_GwZxRsBxzC_-s,3177 +openai/types/audio/__init__.py,sha256=sR9_rMb-gO0stG4ozTq6XJs714C_BfjB3KCgFvyhXVA,1050 +openai/types/audio/speech_create_params.py,sha256=-iUZ3a-BGlg46IFsP_vcJBTRuK_pXruF0KJsbNn0mgU,1300 openai/types/audio/speech_model.py,sha256=RUimvc__LYAxwEEmfrf-lj18O3EWrU1OlWZXEXN2AKY,218 openai/types/audio/transcription.py,sha256=FP9QMwwwdqgvP3xY9P-40gBiFmMwFKxXM5yv5x8xPVk,230 -openai/types/audio/transcription_create_params.py,sha256=5tx0yAERDRZTG0IEsHQODKxMGZKrdRXGo5K2Is0gNw0,2253 +openai/types/audio/transcription_create_params.py,sha256=OP8fXaYYsi5HWi0E7HR5HIRihglsuBqeJWglxkNxLts,2264 +openai/types/audio/transcription_create_response.py,sha256=-PLGH8he9EdJtvBXV-ZrE31CLVnk4bc0VQ1ixRoN8Ck,378 +openai/types/audio/transcription_segment.py,sha256=-pPAGolwIIXUBMic-H5U7aR0u_Aq-pipSA4xTtn_viA,1153 +openai/types/audio/transcription_verbose.py,sha256=tlVK8JzyvkslQOvpAb19PmsfiRBqmbne0l-GqFmVIMU,758 +openai/types/audio/transcription_word.py,sha256=sNDdtjoqIiba6qKsD_lI2Ffs1Lr7qP9HyS59AFh5cTc,368 openai/types/audio/translation.py,sha256=5l-Zk9Cg7AZti-TTn2-4ydsoZj2zdvDwyzzVjVp9W0g,194 -openai/types/audio/translation_create_params.py,sha256=GT1rk1U7nqInbyahHxBXX8uSjmXGCySiIhI53DYgpa4,1524 +openai/types/audio/translation_create_params.py,sha256=lFQEh5IRG5XT-Z3TV7FDSNbIRqAt6yA3EsSvSsb0wsU,1585 +openai/types/audio/translation_create_response.py,sha256=x6H0yjTbZR3vd3d7LdABcn9nrMDNdeMjepcjW1oUfVc,362 +openai/types/audio/translation_verbose.py,sha256=ic6h7_fAKlyrJuCgbd4Vtr0pk9OnynQK_uobD9lAGZo,613 openai/types/audio_model.py,sha256=pxBVwf1HGd6mW-_jd-TDVMRZtTvvCUn_rL8Pt1BXzuo,208 -openai/types/batch.py,sha256=eIOIaJnDuv93fdefTI0WRfTm7MZH8gLBdF0B12JCiZw,2787 -openai/types/batch_create_params.py,sha256=Wq-uHe9FcAPTtN68jEG2xMZWwOC8Q7Dg4GdxV_y5qP0,1441 +openai/types/audio_response_format.py,sha256=EEItnQdwXinG8bOe1We2039Z7lp2Z8wSXXvTlFlkXzM,259 +openai/types/batch.py,sha256=Dq7btfgIT4b2yfh0knZTzAL4yFx_l95H5KLfDPO8iig,2788 +openai/types/batch_create_params.py,sha256=VXpg3mK2xwsUAIbYcFHFgRgLMrN3iBgW8l5rslk0gvQ,1441 openai/types/batch_error.py,sha256=Xxl-gYm0jerpYyI-mKSSVxRMQRubkoLUiOP9U3v72EM,622 openai/types/batch_list_params.py,sha256=X1_sfRspuIMSDyXWVh0YnJ9vJLeOOH66TrvgEHueC84,705 openai/types/batch_request_counts.py,sha256=GHHrJKdJwJ3foBa1j9v5Vece_zzkdXXXgOcne8W1E30,409 -openai/types/beta/__init__.py,sha256=5ojZzNm9el-L9sXfh0E8D2t7N55jmuK_GMEkx9Yn09s,2723 -openai/types/beta/assistant.py,sha256=hn9R5DZ_WSwHRkFAX5biZGC48rMK8ZOf4tSn4J70AAs,4950 -openai/types/beta/assistant_create_params.py,sha256=Av6Cbm37NNMQyhhdhilmbFbrj329sipMiUYGcaFBlx0,7267 +openai/types/beta/__init__.py,sha256=CbOOxDPXvdK5RInCcEiBihJ2XgaUhdm3NMBBwx90OHc,3462 +openai/types/beta/assistant.py,sha256=3w8FpWceagZoKuEQrGeitoosTrz-Z24IPiL-viWC4I4,4936 +openai/types/beta/assistant_create_params.py,sha256=Y5LoiGU9ZTWQ87KaYyrqN1TsMFT4iYsBvMNeDgciRd4,5986 openai/types/beta/assistant_deleted.py,sha256=bTTUl5FPHTBI5nRm7d0sGuR9VCSBDZ-IbOn9G_IpmJQ,301 -openai/types/beta/assistant_list_params.py,sha256=1-osjSX8tKieHSP0xaKBBU8j-J01fKrrxIJRHDudFHk,1220 +openai/types/beta/assistant_list_params.py,sha256=yW-lj6AUkG0IRZQKre0veEr9p4VMN-9YdELFMYs74Cw,1222 openai/types/beta/assistant_response_format_option.py,sha256=yNeoAWxM-_8Sjmwqu8exqyKRFhVZIKeTypetPY55VFA,561 -openai/types/beta/assistant_response_format_option_param.py,sha256=t6eUp_Gogo-5_KOidEe2wKuZxVs72f9yp_HLc5rsDyA,488 -openai/types/beta/assistant_stream_event.py,sha256=ORGXB7viddEHvK4Nb40wqVJylWLgkwVXH7qlyYG9nQE,6829 +openai/types/beta/assistant_response_format_option_param.py,sha256=dyPMhwRSLBZ0ltpxiD7KM-9X6BzWnbGeG-nT_3SenuQ,628 +openai/types/beta/assistant_stream_event.py,sha256=vP4LDqYWzSKGcZ1JAfyNw7YqC__XsVPe0nqZ2qdn93E,6930 openai/types/beta/assistant_tool.py,sha256=_0FC7Db4Ctq_0yLaKJ93zNTB5HthuJWEAHx3fadDRlw,506 openai/types/beta/assistant_tool_choice.py,sha256=Hy4HIfPQCkWD8VruHHicuTkomNwljGHviQHk36prKhg,544 openai/types/beta/assistant_tool_choice_function.py,sha256=aYMlVrZdX2JxmehDlyGALRK2PIEkO7VFEfsvY3VH6T4,270 @@ -449,19 +552,87 @@ openai/types/beta/assistant_tool_choice_option.py,sha256=jrXMd_IYIQ1pt8Lkc-KrPd4 openai/types/beta/assistant_tool_choice_option_param.py,sha256=VcatO5Nej9e5eqfrwetG4uM1vFoewnBEcFz47IxAK2E,424 openai/types/beta/assistant_tool_choice_param.py,sha256=NOWx9SzZEwYaHeAyFZTQlG3pmogMNXzjPJDGQUlbv7Q,572 openai/types/beta/assistant_tool_param.py,sha256=6DcaU3nMjurur2VkVIYcCaRAY1QLQscXXjCd0ZHHGho,501 -openai/types/beta/assistant_update_params.py,sha256=6Eo_HUAJdAwRo7X-zzp4z8PVs9glPS-UV_EWO7aqZL8,4698 +openai/types/beta/assistant_update_params.py,sha256=XsLdjYNx7dbPr1aqDu0_ZGuXjgU0JVuM0waJo1NskyI,4684 +openai/types/beta/auto_file_chunking_strategy_param.py,sha256=hbBtARkJXSJE7_4RqC-ZR3NiztUp9S4WuG3s3W0GpqY,351 openai/types/beta/chat/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122 openai/types/beta/code_interpreter_tool.py,sha256=7mgQc9OtD_ZUnZeNhoobMFcmmvtZPFCNYGB-PEnNnfs,333 openai/types/beta/code_interpreter_tool_param.py,sha256=X6mwzFyZx1RCKEYbBCPs4kh_tZkxFxydPMK4yFNJkLs,389 -openai/types/beta/file_search_tool.py,sha256=6OH6Vt9rV9Ym4U4G61PP6UdnfE-lMWg_HzBFW6bQBNc,974 -openai/types/beta/file_search_tool_param.py,sha256=iDUCeoUJ1uLvUI9rDwerXwDOULw9_bnCZgjXwfWlDi0,981 +openai/types/beta/file_chunking_strategy.py,sha256=6nRvYetBl_BHgN8biTyTut-tw8G13YttgxSKtJsJLeM,560 +openai/types/beta/file_chunking_strategy_param.py,sha256=P0x4I2hB_ylbSxFFEmRqgwto3HQQsHIokX3U0is_a9s,498 +openai/types/beta/file_search_tool.py,sha256=5aNU8RZj-UNdmuqqpjCXNaa1pI9GzSP5qCPtvVSJ1oQ,1769 +openai/types/beta/file_search_tool_param.py,sha256=o6sWPrzRYY8wtNaVuF8h3D1sAQV3N0L3dbdiiaMisW0,1765 openai/types/beta/function_tool.py,sha256=oYGJfcfPpUohKw2ikgshDjOI1HXCK-5pAWyegYNezeU,397 -openai/types/beta/function_tool_param.py,sha256=T_k2OX1OULgkrHHXw0rY_J-O0y5qA0lM-B58C64YyfM,453 -openai/types/beta/thread.py,sha256=wd00j3ogUpOa_O0Sf1m6H4f8t1Nf05DKWiK_4m33O6s,2013 -openai/types/beta/thread_create_and_run_params.py,sha256=ZvuTJnslOhwzzEy946ICkftcopChwylK_KKLEHF3I3w,14477 -openai/types/beta/thread_create_params.py,sha256=0MlmA3nacpq1k7I-gxKvMes0Yo683grly1St4qUlOpQ,6215 +openai/types/beta/function_tool_param.py,sha256=hCclpGO4Re-TxiGy_QxX75g1kcN6_ElubicO6SdJ_YI,471 +openai/types/beta/other_file_chunking_strategy_object.py,sha256=hJz1OeSkvvcWJVftPfvz2pB5ujdawWEEa3v38E6tt7g,311 +openai/types/beta/realtime/__init__.py,sha256=OJOsvJMLlDqJEJClien1XwN8K6vhnyVtNgN1qolZeW0,6167 +openai/types/beta/realtime/conversation_created_event.py,sha256=U4-nesN8rAep2_25E2DrkXUMafQejj3NE_0llXKj5Y8,752 +openai/types/beta/realtime/conversation_item.py,sha256=av6WCjWVuRxBjccmxv4j26cd3TCKURj2a7cf8uS3P3s,2297 +openai/types/beta/realtime/conversation_item_content.py,sha256=S3Gqx5VBji5S64d0Rqq-r4HbUddSl_CT0xLZRfAjgmQ,930 +openai/types/beta/realtime/conversation_item_content_param.py,sha256=bIS_29Z_odnBth8as5pDmUFf1UNwJ9gfFCYlz8ZdRRQ,852 +openai/types/beta/realtime/conversation_item_create_event.py,sha256=PNdOLjWMB2uc0tCm7QdWANXt7FWqKpgocnej2OiEjxw,976 +openai/types/beta/realtime/conversation_item_create_event_param.py,sha256=L9e8U-3LITXlBuJ_FQfGhSDX3Jj7R3uWN1UiG7qDTec,996 +openai/types/beta/realtime/conversation_item_created_event.py,sha256=DIeG7YQ5HdKrnbnorklB1Zfsz42yRdPKDOx5TPzfvw0,722 +openai/types/beta/realtime/conversation_item_delete_event.py,sha256=p-O6R1Ku5pxZvaxhSi4YTPqLXS1SHhdLGgJuPQyPcHY,549 +openai/types/beta/realtime/conversation_item_delete_event_param.py,sha256=a17h8Hd8MxUbXT6NQg8YpTr1ICt1ztRecpfukHw4g34,569 +openai/types/beta/realtime/conversation_item_deleted_event.py,sha256=uWHSqX5ig550romSdhtROwrdQmdeN31Oz1Vpr9IuQFI,492 +openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py,sha256=7tX1hI3g0SbrXGHcaC_Y1xAzhsoziReYwlqyA8ycB3E,764 +openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py,sha256=xYNSBIyERQJ4P-5YoFF1VptfPa8JnJ0sWaH6LGsPow0,1077 +openai/types/beta/realtime/conversation_item_param.py,sha256=x12A5-yjNWodFNJEnbHKY1WJzSzX9s7EQr2c5FuYKBQ,2177 +openai/types/beta/realtime/conversation_item_truncate_event.py,sha256=1c2_BamaTkgD26eyGZJU5xwbz7lRHupqU2HqcK0VniI,943 +openai/types/beta/realtime/conversation_item_truncate_event_param.py,sha256=hSnVOSMMtLf16nn4ISHkevYCfEsiN9kNcgxXRtHa8Kc,983 +openai/types/beta/realtime/conversation_item_truncated_event.py,sha256=K4S35U85J-UNRba9nkm-7G1ReZu8gA8Sa1z0-Vlozc0,704 +openai/types/beta/realtime/error_event.py,sha256=goNkorKXUHKiYVsVunEsnaRa6_3dsDKVtrxXQtzZCmk,877 +openai/types/beta/realtime/input_audio_buffer_append_event.py,sha256=lTKWd_WFbtDAy6AdaCjeQYBV0dgHuVNNt_PbrtPB8tg,662 +openai/types/beta/realtime/input_audio_buffer_append_event_param.py,sha256=XmN2bE6jBRrkKGVPJdnPjJql5dqMPqwbmFnxo-z22JE,682 +openai/types/beta/realtime/input_audio_buffer_clear_event.py,sha256=7AfCQfMxZQ-UoQXF9edYKw5GcTELPcfvvJWWpuLS41c,489 +openai/types/beta/realtime/input_audio_buffer_clear_event_param.py,sha256=y-zfWqJsh1n6r2i0MgLDpnNC4g1dq3GCS66Twfkng38,499 +openai/types/beta/realtime/input_audio_buffer_cleared_event.py,sha256=j9gpm7aGVmrUt48wqtvBMN8NOgtvqHciegjXjOnWm7A,429 +openai/types/beta/realtime/input_audio_buffer_commit_event.py,sha256=SLZR2xxRd6uO3IQL6-LuozkjROXiGyblKoHYQjwXk4I,493 +openai/types/beta/realtime/input_audio_buffer_commit_event_param.py,sha256=B8agXC-rUl-D-RijJ5MeTLgw43qVYzmf2_2oAVokhLY,503 +openai/types/beta/realtime/input_audio_buffer_committed_event.py,sha256=wXMxuXLw1jmT4e-FmTp6rSxcSc_4l55zO3gT7jI1Mp4,628 +openai/types/beta/realtime/input_audio_buffer_speech_started_event.py,sha256=NVp60RUsLFtte9Ilknmu_5lRk2dZp_1fXCgGHd4EvSM,861 +openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py,sha256=gszRuYQtAW8upIhd7CJZ7pxboDk-K7sqidjqxgf47q4,779 +openai/types/beta/realtime/rate_limits_updated_event.py,sha256=kBnf_p-49Q_LNdJsj0R1Szi8R4TGYAAJ_KifLuuyFZw,949 +openai/types/beta/realtime/realtime_client_event.py,sha256=TD_qJi1hNgvurWTUzG-xb27thuvUT2-2AK_pouAY3vc,1249 +openai/types/beta/realtime/realtime_client_event_param.py,sha256=qNStVbW_imzF0F8qfEHHE07AZoPIQLvjcTw9mXu4mFY,1294 +openai/types/beta/realtime/realtime_connect_params.py,sha256=AvTypkFCYmDn9qMeektVqij6cqzgovr3PpgpMalJoJ4,290 +openai/types/beta/realtime/realtime_response.py,sha256=C-3ZTF_gy40eT1eaeWIfpBS3pQC5lv3XNM_mqiLtTWg,1505 +openai/types/beta/realtime/realtime_response_status.py,sha256=gU-59Pr_58TRfMZqFzdCloc53e1qOnU4aaHY3yURUK8,1326 +openai/types/beta/realtime/realtime_response_usage.py,sha256=6XOFjCjPWioHoICZ0Q8KXuUzktQugx6WuTz0O5UvzZg,1541 +openai/types/beta/realtime/realtime_server_event.py,sha256=j8s9jdl5cARv3fVM5jEjo04f83FmNELPRS_lq5Ao_Q0,3512 +openai/types/beta/realtime/response_audio_delta_event.py,sha256=UjbnK4u_WSNTOColZj8SmJgHnAc2H8iRXD76ZnPbz7E,742 +openai/types/beta/realtime/response_audio_done_event.py,sha256=1XEWBPh1JiOgyr6V03mRt_3sLm0YFUq5ft1AhfFlNEg,679 +openai/types/beta/realtime/response_audio_transcript_delta_event.py,sha256=HEVNQ_R2_Nyo6BvNvsliMnN__b17eVd2Jx5udRHg0Hg,773 +openai/types/beta/realtime/response_audio_transcript_done_event.py,sha256=Cn5l4mJnKK3LeSN9qFL4LLqs1WOWg4kt1SaYThB-5c0,787 +openai/types/beta/realtime/response_cancel_event.py,sha256=EKx8IZUISJHdl-_3tCdHtz2BINQ85Tq_ocadnsEGPSk,637 +openai/types/beta/realtime/response_cancel_event_param.py,sha256=nidzBL83liHwyImiNGiz9Ad0V34EtFAQDw1utqcF6ns,630 +openai/types/beta/realtime/response_content_part_added_event.py,sha256=a8-rm1NAwX685fk7GdT6Xi0Yr-JfeAkyUr94-RoFe34,1232 +openai/types/beta/realtime/response_content_part_done_event.py,sha256=jO2TZygxPabbnEG9E1AfNP-JYJv1QtCMnCzgcZ_3n18,1190 +openai/types/beta/realtime/response_create_event.py,sha256=a6oMZOu4htnwQhrroU5LOXqDpbWMvK7PZpznlQcgkqs,4423 +openai/types/beta/realtime/response_create_event_param.py,sha256=jRpgQznlLqgQrR3QyBhv8x1tZVUJaF3t549eaankq5c,4268 +openai/types/beta/realtime/response_created_event.py,sha256=zZtHx-1YjehXxX6aNE88SFINDaKOBzpzejo6sTNjq9g,506 +openai/types/beta/realtime/response_done_event.py,sha256=_yUPoECCli89iHLtV3NQkXQOW6Lc1JlxVPFw04ziBGY,494 +openai/types/beta/realtime/response_function_call_arguments_delta_event.py,sha256=Yh2mQZDucfnTLiO8LRyG9r7zeS1sjwLcMF1JPMdTFJc,793 +openai/types/beta/realtime/response_function_call_arguments_done_event.py,sha256=kxSPK6nbNWL6pxveY7zaNGgCkCXqyBFJPVYJrw9cbOw,793 +openai/types/beta/realtime/response_output_item_added_event.py,sha256=-_BZjvAqcgv3NIz-EMhvYMxIwvcXTt68FVNp0pw09dI,713 +openai/types/beta/realtime/response_output_item_done_event.py,sha256=0ClNVMZmeIxKghlEid9VGoWiZ97wp00hIdNnev4qBD8,709 +openai/types/beta/realtime/response_text_delta_event.py,sha256=B1yyuc6iMOMoG5Wh6W5KoQNYtVD1vEm2cKqHnl2CuFQ,721 +openai/types/beta/realtime/response_text_done_event.py,sha256=mPgVG6nWxwkZ3aZOX-JkVF7CpaWP5-bvtbxFrr4fK7g,724 +openai/types/beta/realtime/session.py,sha256=OqdK0L7ugOYV0PT2XlixRERimneHIF7-oHUh1JWtK70,5388 +openai/types/beta/realtime/session_create_params.py,sha256=ALVf2hYtcdZjl2A5LJyjiyPfqSFEAeTIVkELbeSaH-g,5161 +openai/types/beta/realtime/session_create_response.py,sha256=iyovJfORab-aDJJKE8PN--VQeCBR3VlnyV1tf4qE-K0,5411 +openai/types/beta/realtime/session_created_event.py,sha256=rTElnBlE7z1htmkdmpdPN4q_dUYS6Su4BkmsqO65hUc,489 +openai/types/beta/realtime/session_update_event.py,sha256=VwRvNgu-otI5_0xnXso1gqlCEWFnqzrGq9-Kar_o71Q,5751 +openai/types/beta/realtime/session_update_event_param.py,sha256=NmDaFhVTohrHi-yRd1x883NUjGH3N6ZWyfpfJ0tEpTQ,5573 +openai/types/beta/realtime/session_updated_event.py,sha256=HyR-Pz3U9finVO-bUCvnmeqsANw-fceNvVqEIF6ey10,489 +openai/types/beta/static_file_chunking_strategy.py,sha256=nHaLv70q1rencY2u8mqS7mW7X7enzHrc-zM9mg22dHw,597 +openai/types/beta/static_file_chunking_strategy_object.py,sha256=aOPxudte299F0j3bzniXcKJ7j-w4ZfQpgFHTa3CFyZ8,425 +openai/types/beta/static_file_chunking_strategy_param.py,sha256=kCMmgyOxO0XIF2wjCWjUXtyn9S6q_7mNmyUCauqrjsg,692 +openai/types/beta/thread.py,sha256=9wxx6M26S7cilx5SKWjZnkHc7g222AIOhikd0WTJfwI,2014 +openai/types/beta/thread_create_and_run_params.py,sha256=NHkj-IMm2WEqH82i9zxqgJqYkOVCBVXSpZcpl-SVznY,13175 +openai/types/beta/thread_create_params.py,sha256=U0gNXfSltPqYF3GIGQ7dloolkz6nzuDimXF-V9wjzvo,4970 openai/types/beta/thread_deleted.py,sha256=MaYG_jZIjSiB9h_ZBiTtpMsRSwFKkCY83ziM5GO_oUk,292 -openai/types/beta/thread_update_params.py,sha256=RYsR88YHwReKLiLqnLlnWiReiVIGlEGvVV9-g_wptgM,1750 +openai/types/beta/thread_update_params.py,sha256=olIjwn1eD0H2AkjdDZC38lPdT5dp2ORSjavPA7pB_08,1751 openai/types/beta/threads/__init__.py,sha256=0WsJo0tXp08CgayozR7Tqc3b8sqzotWzvBun19CEIWc,3066 openai/types/beta/threads/annotation.py,sha256=Ce3Y0mSodmYRkoqyhtyIdep6WfWew6KJJgtrENOnfek,462 openai/types/beta/threads/annotation_delta.py,sha256=iNsE-1Gn1yU0TlTHoxqKbOvPRUxWuXsF72qY_mMnWGY,510 @@ -481,40 +652,42 @@ openai/types/beta/threads/image_url_content_block_param.py,sha256=RWzo5KkBiwvgJS openai/types/beta/threads/image_url_delta.py,sha256=MXCp-OmuNT4njbWA9DWAbocP7pD3VpdcUy2wgeOjwm4,582 openai/types/beta/threads/image_url_delta_block.py,sha256=Jjdfub4g9ceNKF8GuuTIghOmYba2vEeX3320mg5PWIA,484 openai/types/beta/threads/image_url_param.py,sha256=VRLaxZf-wxnvAOcKGwyF_o6KEvwktBfE3B6KmYE5LZo,602 -openai/types/beta/threads/message.py,sha256=r3Lj8coZqEoz-2nT11yivi7K25CcUskbNSo4x7sWXPg,3294 +openai/types/beta/threads/message.py,sha256=aGWe0kiNv5sXUYheJ0o1KpTds4oTaeDmqot1PMStJCE,3295 openai/types/beta/threads/message_content.py,sha256=b8IC_EG28hcXk28z09EABfJwPkYZ7U-lTp_9ykdoxvU,630 openai/types/beta/threads/message_content_delta.py,sha256=o4Edlx9BtdH2Z4OMwGWWXex8wiijknNRihJ-wu8PDUQ,615 openai/types/beta/threads/message_content_part_param.py,sha256=RXrnoDP2-UMQHoR2jJvaT3JHrCeffLi6WzXzH05cDGI,550 -openai/types/beta/threads/message_create_params.py,sha256=Qs7Gxs8ZKwzk_7ZhJOwj4KiHkidSmy5qc_Dam0P8F5E,1956 +openai/types/beta/threads/message_create_params.py,sha256=WYfc_-kc7lxcxdpwKCVT2Ei-5Jl_132uqOHMtXL92OE,1957 openai/types/beta/threads/message_deleted.py,sha256=DNnrSfGZ3kWEazmo4mVTdLhiKlIHxs-D8Ef5sNdHY1o,303 openai/types/beta/threads/message_delta.py,sha256=-kaRyvnIA8Yr2QV5jKRn15BU2Ni068a_WtWJ4PqlLfE,570 openai/types/beta/threads/message_delta_event.py,sha256=7SpE4Dd3Lrc_cm97SzBwZzGGhfLqiFViDeTRQz-5YmQ,579 -openai/types/beta/threads/message_list_params.py,sha256=LXqc3deSkKO6VN337OlQ4fzG7dfgBE7Iv_CLzZHhbhw,1294 -openai/types/beta/threads/message_update_params.py,sha256=bw6_U-vZA4c9_CDmeGOh7IEPIm8BU3BBOKtxnii0LKA,629 +openai/types/beta/threads/message_list_params.py,sha256=iuwzDccnViooUxHlq-WoE1FEJArNy5-zrYCoaNgVS8k,1296 +openai/types/beta/threads/message_update_params.py,sha256=jTM_WDKDuPVJKlNKlT6J_UqQjgM2vrrD03ZhvHI5bSY,630 openai/types/beta/threads/refusal_content_block.py,sha256=qB9jrS2Wv9UQ7XXaIVKe62dTAU1WOnN3qenR_E43mhg,310 openai/types/beta/threads/refusal_delta_block.py,sha256=ZhgFC8KqA9LIwo_CQIX-w3VVg3Vj0h71xC1Hh1bwmnU,423 openai/types/beta/threads/required_action_function_tool_call.py,sha256=XsR4OBbxI-RWteLvhcLEDBan6eUUGvhLORFRKjPbsLg,888 -openai/types/beta/threads/run.py,sha256=_yc01xJzML8ekoB5mX9HGxlJUJmPaiukg0rpuQAK3Rc,8211 -openai/types/beta/threads/run_create_params.py,sha256=4wxYnpiZfeUfkW79Y3_qLJgqEHnY2b1_SXvyLwaD_FQ,9163 -openai/types/beta/threads/run_list_params.py,sha256=73poqeRcb5TEsIVn7OzJ_g9OajNokEzpCVLzVNKZmPk,1208 +openai/types/beta/threads/run.py,sha256=GR469hvbAlWTHL17MieCYxQfASyxaY1ZOe6Qbf0ORMI,8218 +openai/types/beta/threads/run_create_params.py,sha256=KgltVibs_KnKsL3UaZyVJgb-6aUxct7CXUtqMdkTXTM,9670 +openai/types/beta/threads/run_list_params.py,sha256=TgepSLrupUUtuQV2kbVcoGH1YA0FVUX9ESkszKuwyHY,1210 openai/types/beta/threads/run_status.py,sha256=OU1hzoyYXaRJ3lupX4YcZ-HZkTpctNE4tzAcp6X8Q9U,351 -openai/types/beta/threads/run_submit_tool_outputs_params.py,sha256=aDrg0FZZoJKaPVQzcFjUg4ZKaeW8KF6UJBxhJEIjC2I,1630 -openai/types/beta/threads/run_update_params.py,sha256=76dWMNa3zCUliemCdwWv6p07GNeMYCdZoJs9KNbdZSE,621 -openai/types/beta/threads/runs/__init__.py,sha256=uhxk5F1_5c5wg2_p70AjlOy9cE3Ga8-ILn4Ep-gcls4,1515 +openai/types/beta/threads/run_submit_tool_outputs_params.py,sha256=cKiyD374BsZN_Oih5o5n5gOf_DYsxErVrbgxveNhmPI,1643 +openai/types/beta/threads/run_update_params.py,sha256=EDYJO3YuH1IKjfR1xAaBtWFonNnyXJDYAnlaMnwyXo8,622 +openai/types/beta/threads/runs/__init__.py,sha256=mg_roY9yL1bClJ8isizkQgHOAkN17iSdVr2m65iyBrs,1653 openai/types/beta/threads/runs/code_interpreter_logs.py,sha256=7wXZpUE9I-oZJ0K3mFG0Nwmfm2bKGiSpWJyBeo7txwo,482 openai/types/beta/threads/runs/code_interpreter_output_image.py,sha256=8o99k0ZHMHpqH0taXkOkYR9WaDUpCN-G0Ifd5XsJpb8,613 openai/types/beta/threads/runs/code_interpreter_tool_call.py,sha256=ekiIuH1kVCN51hCzY3AYr5i3_a4vlgUiZHJ59pl17oY,1810 openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py,sha256=Qr2cen-bKyXTW2NDEUHnmJRE0jY-nkLcnO4NzCbBPDo,1479 -openai/types/beta/threads/runs/file_search_tool_call.py,sha256=PPxrJP3r4RWFTeE5mU-9SbFz37JmKHOGfsxlZGydyW0,522 +openai/types/beta/threads/runs/file_search_tool_call.py,sha256=XBgsM_USVr3ZrwTZx4L1-YG94Qv8c8GXI19ZHtDrZq8,1897 openai/types/beta/threads/runs/file_search_tool_call_delta.py,sha256=Gx8c7GSgGYuOvGadcAr3ZIspEFMZS3e2OY7vBo_MYnM,655 openai/types/beta/threads/runs/function_tool_call.py,sha256=aOq5yOtKOi6C5Q1FIQRxqtJJR1AcSW_K5PvRiKISNCI,920 openai/types/beta/threads/runs/function_tool_call_delta.py,sha256=VFRtCJkj4PHX97upM1cXpJAk9-JvJSgyngie06fBIjQ,1076 openai/types/beta/threads/runs/message_creation_step_details.py,sha256=tRFMNF2Rf4DekVliUKkoujItiOjjAE9EG9bbxJvpVPA,506 -openai/types/beta/threads/runs/run_step.py,sha256=NA3QgTsa646h3LYV13K3CdpsEQHaaD1QN2zT7STo6zo,3468 +openai/types/beta/threads/runs/run_step.py,sha256=L_CiwlW9y7NEOTumv1RyoQrQ_oCaNowRmraUHiAgJEc,3469 openai/types/beta/threads/runs/run_step_delta.py,sha256=FNYDTddRrTO3PT_fgi7AsJ1PeMtyWsVzcxoihjbBzAw,663 openai/types/beta/threads/runs/run_step_delta_event.py,sha256=rkDyvHSXt-hc1LngB41f9vglkn6t03kS62bsn0iGaxU,585 openai/types/beta/threads/runs/run_step_delta_message_delta.py,sha256=UIo6oPH8STLjPHiWL-A4CtKfYe49uptvIAHWNnZ3Ums,564 -openai/types/beta/threads/runs/step_list_params.py,sha256=2vMPFMElvK135ncP9ch6kUnzPGOSIPT3Eio18jJhAqk,1250 +openai/types/beta/threads/runs/run_step_include.py,sha256=u-9Cw1hruRiWr70f_hw4XG0w1cwOAYfRJYKva2dEacs,264 +openai/types/beta/threads/runs/step_list_params.py,sha256=zorF5juogCzLMsZLjzMZTs_iIBcPj9WUur5HcrXuH8M,1752 +openai/types/beta/threads/runs/step_retrieve_params.py,sha256=aJ7l8RDJLPyEmqjfO4XsTV54VZOOqyb_gKSUvqp33ZI,815 openai/types/beta/threads/runs/tool_call.py,sha256=1rwq4IbLgjQAQ-ORXYkNpmJyi9SREDnqA57nJbj_NiU,537 openai/types/beta/threads/runs/tool_call_delta.py,sha256=t5wF8ndW3z99lHF981FL-IN5xXBS9p7eonH9bxvKu_c,600 openai/types/beta/threads/runs/tool_call_delta_object.py,sha256=eK20VsIswEyT48XbkGu60HUrE7OD3fhpn1fbXrVauM4,615 @@ -524,82 +697,94 @@ openai/types/beta/threads/text_content_block.py,sha256=pdGlKYM1IF9PjTvxjxo1oDg1X openai/types/beta/threads/text_content_block_param.py,sha256=feQr0muF845tc1q3FJrzgYOhXeuKLU3x1x5DGFTN2Q0,407 openai/types/beta/threads/text_delta.py,sha256=2EFeQCkg_cc8nYEJ6BtYAA3_TqgMTbmEXoMvLjzaB34,389 openai/types/beta/threads/text_delta_block.py,sha256=pkHkVBgNsmHi9JURzs5ayPqxQXSkex3F0jH0MqJXik0,448 -openai/types/beta/vector_store.py,sha256=zaSaSUpStD3iuyas9f7VQCNF1byxnXRz_5q36eizNGE,2353 -openai/types/beta/vector_store_create_params.py,sha256=Q-pD0r2gg0PpTQdrltvQ-YdG8dDF48i8EoBsr0PRPWM,2509 +openai/types/beta/vector_store.py,sha256=R8M70uuGWVKt4t0ef__Py-MPw33Ljx4sh5ddihJMbIU,2354 +openai/types/beta/vector_store_create_params.py,sha256=rvvYUSDBbc5L6PAiMGSFQD85ugyR9mLdvZMxjap0fnk,1600 openai/types/beta/vector_store_deleted.py,sha256=Yq0E1orRLShseLwZ1deiBdDEUgEw_tcYVxGYa5gbIrM,308 -openai/types/beta/vector_store_list_params.py,sha256=8iUgSgs_TeehprKjtTLWOGeH_R8LbDdLkdwMq9xVpSA,1224 -openai/types/beta/vector_store_update_params.py,sha256=AHlOV4f36UWAH4k7XKlGa51Mfao2f7339qI3fskWbIk,1114 +openai/types/beta/vector_store_list_params.py,sha256=KeSeQaEdqO2EiPEVtq1Nun-uRRdkfwW0P8aHeCmL5zA,1226 +openai/types/beta/vector_store_update_params.py,sha256=6OEP1IvilrGoPhHQPXOMQA0TwmCubeo7rB_ik5GQSrY,1115 openai/types/beta/vector_stores/__init__.py,sha256=gXfm8V5Ad0iueaC_VoHDUQvSdwSfBzk2cQNwZldvY0s,671 -openai/types/beta/vector_stores/file_batch_create_params.py,sha256=c3Syo18qBuM1NYOopZN0CLigi864uo9jyFa4W6lH4i4,1922 -openai/types/beta/vector_stores/file_batch_list_files_params.py,sha256=6c_KvnlFV0vkFid_thhyEK6HC6F1ixbDh2roExL_-qk,1449 -openai/types/beta/vector_stores/file_create_params.py,sha256=6gCvIuEgvaPIIGS8IHwWQVTfwPgDN2Mt5zrDZY9z-4M,1890 -openai/types/beta/vector_stores/file_list_params.py,sha256=UC6NzZQ79tInL8xV3pMm66IFWsIT9PW_BhSbQLm4ar4,1383 -openai/types/beta/vector_stores/vector_store_file.py,sha256=TxefqQwU1vrJdINGdv91EQjyGZ-5eBqjAcUJAPvVETM,2730 +openai/types/beta/vector_stores/file_batch_create_params.py,sha256=lV4t5kikvEhl431RZgGDyQdFKTl-zXI-Q7YnbM0Qmv8,798 +openai/types/beta/vector_stores/file_batch_list_files_params.py,sha256=FPpQvCQI2skyLB8YCuwdCj7RbO9ba1UjaHAtvrWxAbs,1451 +openai/types/beta/vector_stores/file_create_params.py,sha256=kwSqe-le2UaYrcXGPxlP41QhH2OGvLXBbntAGlmK288,748 +openai/types/beta/vector_stores/file_list_params.py,sha256=AIzmNH1oFuy-qlpRhj9eXu9yyTA-2z_IppLYFclMtZw,1385 +openai/types/beta/vector_stores/vector_store_file.py,sha256=X8aQg4jYlK7iQumxn7B-eammIKVjUbu4lapPeq9jDWo,1788 openai/types/beta/vector_stores/vector_store_file_batch.py,sha256=ubvj8z95EOdRGAp0rgI94g5uFQx0ob8hLgwOWHKda4E,1457 openai/types/beta/vector_stores/vector_store_file_deleted.py,sha256=37J7oL2WYCgOd7Rhg2jX6IavaZT63vgUf3u6LC6C3Hs,322 -openai/types/chat/__init__.py,sha256=epD7g5z--KfkvxuhuvFS1uXFrlrV3djgoR-ORTYkbjI,3050 +openai/types/chat/__init__.py,sha256=coi_C98uX9XhThMVJ0GgjPVpzOYOMgj-ZmCWulEE3EA,3849 openai/types/chat/chat_completion.py,sha256=MaTVOMwtbzqGyHgyP4DP41ESEDKhv_XOM8L_fx3uoQE,2689 -openai/types/chat/chat_completion_assistant_message_param.py,sha256=0m5WjA97DuxiGGvUyJQnlkf1SqLEr2Ce-kUTBvtLbBc,2114 +openai/types/chat/chat_completion_assistant_message_param.py,sha256=E6ZrsjEN_JHOHO-wC7Uk90Fa7Qz7bfgx8jea0z6g30s,2421 +openai/types/chat/chat_completion_audio.py,sha256=vzWeaAAAbomkvbFksXQu6qpw1RVJiuFytJZswO6h6vI,656 +openai/types/chat/chat_completion_audio_param.py,sha256=MnY4PNK8-OOaODkHNhBbSbzH4HmqykKvwftsOjVpOAE,801 openai/types/chat/chat_completion_chunk.py,sha256=aQXFY4gq9YEIrr7YBM68D5XyWGT9kKo0JO8n-55IjEA,5032 -openai/types/chat/chat_completion_content_part_image_param.py,sha256=ODHcWpe8TIXZQHXHhEEacrRHm_TCaFWZnml-bD85XiU,797 -openai/types/chat/chat_completion_content_part_param.py,sha256=8hoTnNqerHjaHGMFU8CvhjVbH8yChXEYxs3jLWKfod8,543 +openai/types/chat/chat_completion_content_part_image_param.py,sha256=Gqv98qyD8jB81THZp49c8v2tHrId_iQp4NzciT9SKI0,797 +openai/types/chat/chat_completion_content_part_input_audio_param.py,sha256=r1EXNEtjJo5oJ9AnP3omaJzACE1gSfdmob5Q0HKsOm4,704 +openai/types/chat/chat_completion_content_part_param.py,sha256=7lCk-fZB5iT5keHLWw9eM-Hd5jsnPh2IIHICIUpoEXk,686 openai/types/chat/chat_completion_content_part_refusal_param.py,sha256=TV1vu-IgrvKa5IBlPSIdBxUaW8g1zDhMOOBOEmhU2w0,467 openai/types/chat/chat_completion_content_part_text_param.py,sha256=4IpiXMKM9AuTyop5PRptPBbBhh9s93xy2vjg4Yw6NIw,429 +openai/types/chat/chat_completion_developer_message_param.py,sha256=OCFKdTWkff94VtgY7AaDUUFiZLT8LBn7WWxjbcIq2OM,830 openai/types/chat/chat_completion_function_call_option_param.py,sha256=M-IqWHyBLkvYBcwFxxp4ydCIxbPDaMlNl4bik9UoFd4,365 openai/types/chat/chat_completion_function_message_param.py,sha256=jIaZbBHHbt4v4xHCIyvYtYLst_X4jOznRjYNcTf0MF0,591 -openai/types/chat/chat_completion_message.py,sha256=CYVebAMTUfREmvkykqXSNE6tGzEJu1QzClZ_ZgFD73s,1371 -openai/types/chat/chat_completion_message_param.py,sha256=RFer4ZYXxVed9F0ulkqi0xNy_eOhp63Y-0oN24dhVBI,889 +openai/types/chat/chat_completion_message.py,sha256=AH7JpjgKfphxBRJyI4PhwHCMREy_-D-a4_4u4NHjSfc,1674 +openai/types/chat/chat_completion_message_param.py,sha256=aLrz_cX_CYymFdW9cMIPZpv0Z4zM50RECV3SH6QNZsc,1019 openai/types/chat/chat_completion_message_tool_call.py,sha256=XlIe2vhSYvrt8o8Yol5AQqnacI1xHqpEIV26G4oNrZY,900 openai/types/chat/chat_completion_message_tool_call_param.py,sha256=XNhuUpGr5qwVTo0K8YavJwleHYSdwN_urK51eKlqC24,1009 +openai/types/chat/chat_completion_modality.py,sha256=8Ga0kruwJc43WD2OIqNudn7KrVRTPDQaalVkh_8bp9I,236 openai/types/chat/chat_completion_named_tool_choice_param.py,sha256=JsxfSJYpOmF7zIreQ0JrXRSLp07OGCBSycRRcF6OZmg,569 +openai/types/chat/chat_completion_prediction_content_param.py,sha256=Xw4K_4F379LsXENOpZvREDn55cCnbmZ69xa4fw9w3bg,868 +openai/types/chat/chat_completion_reasoning_effort.py,sha256=Bs4xRaukXpM-_NW-QSKKnUyIPDw1ffSqnWaHU-rMdIE,258 openai/types/chat/chat_completion_role.py,sha256=Rdzg4deI1uZmqgkwnMrLHvbV2fPRqKcHLQrVmKVk9Dw,262 openai/types/chat/chat_completion_stream_options_param.py,sha256=7-R2mYh7dbtX9qDOL3UkeyVH6FNWC_4aTCLtHYObMbs,628 openai/types/chat/chat_completion_system_message_param.py,sha256=WYtzmsNP8ZI3Ie8cd-oU7RuNoaBF6-bBR3mOzST9hMw,815 openai/types/chat/chat_completion_token_logprob.py,sha256=6-ipUFfsXMf5L7FDFi127NaVkDtmEooVgGBF6Ts965A,1769 openai/types/chat/chat_completion_tool_choice_option_param.py,sha256=ef71WSM9HMQhIQUocRgVJUVW-bSRwK2_1NjFSB5TPiI,472 openai/types/chat/chat_completion_tool_message_param.py,sha256=5K7jfKpwTuKNi1PTFabq_LHH-7wun8CUsLDh90U8zQE,730 -openai/types/chat/chat_completion_tool_param.py,sha256=sve2G1DayUs-1CMzXK1x104r8KTa5K62CZdxoyLmFlk,485 +openai/types/chat/chat_completion_tool_param.py,sha256=J9r2TAWygkIBDInWEKx29gBE0wiCgc7HpXFyQhxSkAU,503 openai/types/chat/chat_completion_user_message_param.py,sha256=mik-MRkwb543C5FSJ52LtTkeA2E_HdLUgtoHEdO73XQ,792 -openai/types/chat/completion_create_params.py,sha256=WT1UacPa7U_YlOgwClm_xBuoPdwApX-VRQmTsKFdwx4,11205 +openai/types/chat/completion_create_params.py,sha256=CGwTjckVhpxaQfA9zRKmrMCHvnYk-eaPFVmSVoA5Nls,13926 openai/types/chat/parsed_chat_completion.py,sha256=KwcwCtj0yexl6gB7yuOnyETRW-uUvNRYbVzPMkwCe5Q,1437 openai/types/chat/parsed_function_tool_call.py,sha256=hJzcKOpzf1tnXC6RGbPhaeCawq8EFdnLK_MfRITkW1U,920 -openai/types/chat_model.py,sha256=sR4hutPtTSFNrs3l5Rfj_lkDz_t11z1BjeBDKf9C4kg,755 +openai/types/chat_model.py,sha256=k9Ic_l5usRyY6xSHnqe4dBMKM5R4klTGuANg6z88WFk,1107 openai/types/completion.py,sha256=yuYVEVkJcMVUINNLglkxOJqCx097HKCYFeJun3Js73A,1172 openai/types/completion_choice.py,sha256=PUk77T3Cp34UJSXoMfSzTKGWDK0rQQwq84X_PSlOUJo,965 -openai/types/completion_create_params.py,sha256=mEyR068kk36ZylY4d1K3sqnucpUz9fAqEyoEwmW3DtQ,7567 -openai/types/completion_usage.py,sha256=9m5PmCyfVy93ZfIszGpcG1gdcEaSr22HJgaTJ_ImVrs,435 +openai/types/completion_create_params.py,sha256=TWNRWlGAcvirzY3Piy6AeYKyNxG7ktmtwjS27Q4bTi8,7535 +openai/types/completion_usage.py,sha256=uf5n0vzlCkGAU67BBn_h7yhjd_G4OHpQbJnvzz0eO2A,1735 openai/types/create_embedding_response.py,sha256=lTAu_Pym76kFljDnnDRoDB2GNQSzWmwwlqf5ff7FNPM,798 openai/types/embedding.py,sha256=2pV6RTSf5UV6E86Xeud5ZwmjQjMS93m_4LrQ0GN3fho,637 -openai/types/embedding_create_params.py,sha256=3p7U8i2uG1SCpELbn_IeDMLkFe-vv7cyB5dx-_4U8iU,1885 +openai/types/embedding_create_params.py,sha256=C9Tm1C_m96QtjyNc8fiy6wzs9HkM2GUF8CSTSS6V7ks,1850 +openai/types/embedding_model.py,sha256=0dDL87len4vZ4DR6eCp7JZJCJpgwWphRmJhMK3Se8f4,281 openai/types/file_content.py,sha256=qLlM4J8kgu1BfrtlmYftPsQVCJu4VqYeiS1T28u8EQ8,184 -openai/types/file_create_params.py,sha256=f8-xfoAlZPl5FuOz0h5sJTdAoBuJEIXVz_iyL9iTCbg,926 +openai/types/file_create_params.py,sha256=N1I3rER1se27usx46fhkvdtn-blJ6Y9ECT7Wwzve37Q,913 openai/types/file_deleted.py,sha256=H_r9U7XthT5xHAo_4ay1EGGkc21eURt8MkkIBRYiQcw,277 -openai/types/file_list_params.py,sha256=VhZbSrCO0fYnUTgPE_nuBy-3A5MjpXiBtI-BahAc5SY,310 +openai/types/file_list_params.py,sha256=TmmqvM7droAJ49YlgpeFzrhPv5uVkSZDxqlG6hhumPo,960 openai/types/file_object.py,sha256=ESuRYCTLbDtHxyuhzybKTF_TztIcq_F7TzCTQ6JToE0,1309 +openai/types/file_purpose.py,sha256=o1TzR-41XsNsQ0791GTGPe3DLkU9FEODucKdP6Q6sPc,243 openai/types/fine_tuning/__init__.py,sha256=SZvjq_22oY9E4zcnrvVd0ul9U4sk_IBeOd0MsNALu5s,806 -openai/types/fine_tuning/fine_tuning_job.py,sha256=YOcsIJZPPAqOnQudOkS_Am-peQuHyyvcMWVDxFvJdEA,3861 -openai/types/fine_tuning/fine_tuning_job_event.py,sha256=oCkO0yImLZnZQLeU4GH6YyUlDG25pzs41SCWWB-sd_o,374 +openai/types/fine_tuning/fine_tuning_job.py,sha256=bu-afb1RZqgNmpUQ7MoXymTjFs3i5JSsBLMV4TKHhi8,6473 +openai/types/fine_tuning/fine_tuning_job_event.py,sha256=POxSD7-WxAtJV2KuEpA9EmZi7W_u0PikOUtUzxIXii4,854 openai/types/fine_tuning/fine_tuning_job_integration.py,sha256=c3Uy7RMVJ32Xlat-6s9eG-5vZLl4w66COXc0B3pWk4g,242 openai/types/fine_tuning/fine_tuning_job_wandb_integration.py,sha256=YnBeiz14UuhUSpnD0KBj5V143qLvJbDIMcUVWOCBLXY,1026 openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py,sha256=7vEc2uEV2c_DENBjhq0Qy5X8B-rzxsKvGECjnvF1Wdw,804 -openai/types/fine_tuning/job_create_params.py,sha256=Qhclk88QYCmmeBsPzfKBjNHHR5juW6-sUcRZDoSQH94,4693 +openai/types/fine_tuning/job_create_params.py,sha256=TwQlyQrZfxrgqD7nmJDWE8pwklsdUUmkYaitvB7LY34,7222 openai/types/fine_tuning/job_list_events_params.py,sha256=4xOED4H2ky2mI9sIDytjmfJz5bNAdNWb70WIb_0bBWs,400 openai/types/fine_tuning/job_list_params.py,sha256=yjxaEnESVTRpJ9ItvjKq30KcD_xz_trqKMIxG2eAriE,396 openai/types/fine_tuning/jobs/__init__.py,sha256=nuWhOUsmsoVKTKMU35kknmr8sfpTF-kkIzyuOlRbJj0,295 openai/types/fine_tuning/jobs/checkpoint_list_params.py,sha256=XoDLkkKCWmf5an5rnoVEpNK8mtQHq1fHw9EqmezfrXM,415 openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py,sha256=Z_sUhebJY9nWSssZU7QoOJwe5sez76sCAuVeSO63XhY,1347 openai/types/image.py,sha256=9No-8GHesOUbjchemY1jqtMwh_s22oBmLVFlLn2KoQo,607 -openai/types/image_create_variation_params.py,sha256=9FuF7N6Ju7BusvbQnMY5ddqHN_YInHkUlqaiVstxwYs,1477 -openai/types/image_edit_params.py,sha256=LvbWaTXVG_yneNnnpkNAocImIhqR-0jaHrRDlj7Tl2I,1837 -openai/types/image_generate_params.py,sha256=S1aA2trSzhLl2OXaFHtQiuJz6P7F_IIzPIswbvUYCjU,2132 +openai/types/image_create_variation_params.py,sha256=PvvPvHXvz0etrRrzVIyvRjvDvNbjGspPu85hOq2fLII,1477 +openai/types/image_edit_params.py,sha256=cxpBybs5peY0DJMTWHgoIx3dWIXj0Y0YmvgxrjGmWjo,1837 +openai/types/image_generate_params.py,sha256=bD2AEIetbt37YDp65vEFfGxkLndOFCwhzJol1I63wfA,2132 openai/types/image_model.py,sha256=W4YchkhJT2wZdlNDUpVkEKg8zdDDfp9S3oTf4D8Wr8g,219 openai/types/images_response.py,sha256=EJ4qxYZ8CPGh2SZdRsyw6I0FnUvlgwxwc4NgPovJrvk,274 openai/types/model.py,sha256=DMw8KwQx8B6S6sAI038D0xdzkmYdY5-r0oMhCUG4l6w,532 openai/types/model_deleted.py,sha256=tXZybg03DunoOSYvwhT7zKj7KTN42R0VEs_-3PRliMo,229 -openai/types/moderation.py,sha256=ihR2jzld_BfOaHW1_6A2csTInEaJvAl5nPxuh_jegY4,3933 -openai/types/moderation_create_params.py,sha256=TADBGDorBDzcTzkylSB2eoN4cvRmZ0ADN00DzPdI1IA,948 +openai/types/moderation.py,sha256=6CZmxhZiafnT50gKa7BeybrTSoYfCAk7wvD5CQHvBP0,6789 +openai/types/moderation_create_params.py,sha256=EaZ2cej25g5WbRB2kIY7JFCXQPKSQQ95iyoUAAelGr4,992 openai/types/moderation_create_response.py,sha256=e6SVfWX2_JX25Za0C6KojcnbMTtDB2A7cjUm6cFMKcs,484 -openai/types/moderation_model.py,sha256=zak2cYrNYevj0TItwsa2inX8NhQS0rUJ2Duhsbl7PxU,257 +openai/types/moderation_image_url_input_param.py,sha256=t1r9WD3c-CK2Al1lpB4-DjfzLFSwgETR0g8nsRdoL0Y,622 +openai/types/moderation_model.py,sha256=BFeqSyel2My2WKC6MCa_mAIHJx4uXU3-p8UNudJANeM,319 +openai/types/moderation_multi_modal_input_param.py,sha256=RFdiEPsakWIscutX896ir5_rnEA2TLX5xQkjO5QR2vs,483 +openai/types/moderation_text_input_param.py,sha256=ardCbBcdaULf8bkFuzkSKukV9enrINSjNWvb7m0LjZg,406 openai/types/shared/__init__.py,sha256=34RJ2IUXj0f3B73a6rqeHILu8AH5-sC8npTbEx_bnk8,551 openai/types/shared/error_object.py,sha256=G7SGPZ9Qw3gewTKbi3fK69eM6L2Ur0C2D57N8iEapJA,305 openai/types/shared/function_definition.py,sha256=8a5uHoIKrkrwTgfwTyE9ly4PgsZ3iLA_yRUAjubTb7Y,1447 @@ -608,15 +793,16 @@ openai/types/shared/response_format_json_object.py,sha256=15KTCXJ0o1W4c5V1vAcOQA openai/types/shared/response_format_json_schema.py,sha256=rZS7diOPeqK48O_R6OYMJ6AtSGy_88PKTxzha6_56Fo,1399 openai/types/shared/response_format_text.py,sha256=GX0u_40OLmDdSyawDrUcUk4jcrz1qWsKmmAMP4AD7hc,318 openai/types/shared_params/__init__.py,sha256=GcNBmK_EPlGE-xPFmSQjlOq7SuNYd2nwDswX4ExHwoU,498 -openai/types/shared_params/function_definition.py,sha256=JM6luT50hXSYChpFmO5aI1XI3E8uhdibYpQrL3NzKvc,1479 +openai/types/shared_params/function_definition.py,sha256=ciMXqn1tFXnp1tg9weJW0uvtyvMLrnph3WXMg4IG1Vk,1482 openai/types/shared_params/function_parameters.py,sha256=UvxKz_3b9b5ECwWr8RFrIH511htbU2JZsp9Z9BMkF-o,272 openai/types/shared_params/response_format_json_object.py,sha256=QT4uJCK7RzN3HK17eGjEo36jLKOIBBNGjiX-zIa9iT4,390 openai/types/shared_params/response_format_json_schema.py,sha256=Uu2ioeSbI64bm-jJ61OY8Lr3PpofTR4d2LNBcaYxlec,1360 openai/types/shared_params/response_format_text.py,sha256=SjHeZAfgM1-HXAoKLrkiH-VZEnQ73XPTk_RgtJmEbU4,364 openai/types/upload.py,sha256=mEeQTGS0uqFkxbDpJzgBUvuDhGVPw9cQxhRJjPBVeLo,1186 openai/types/upload_complete_params.py,sha256=7On-iVAlA9p_nksLSFPBPR4QbB0xEtAW-skyh7S9gR0,504 -openai/types/upload_create_params.py,sha256=RGqqr4UwsQJiB4i_uSm9CcPok8XYPX7c18C95gATS2o,901 +openai/types/upload_create_params.py,sha256=ZiZr1yC6g2VqL7KEnw7lhE4kZvU-F3DfTAc2TPk-XBo,889 openai/types/uploads/__init__.py,sha256=fDsmd3L0nIWbFldbViOLvcQavsFA4SL3jsXDfAueAck,242 openai/types/uploads/part_create_params.py,sha256=pBByUzngaj70ov1knoSo_gpeBjaWP9D5EdiHwiG4G7U,362 openai/types/uploads/upload_part.py,sha256=U9953cr9lJJLWEfhTiwHphRzLKARq3gWAWqrjxbhTR4,590 +openai/types/websocket_connection_options.py,sha256=4cAWpv1KKp_9pvnez7pGYzO3s8zh1WvX2xpBhpe-96k,1840 openai/version.py,sha256=cjbXKO8Ut3aiv4YlQnugff7AdC48MpSndcx96q88Yb8,62 diff --git a/portkey_ai/_vendor/openai-1.40.1.dist-info/REQUESTED b/portkey_ai/_vendor/openai-1.58.1.dist-info/REQUESTED similarity index 100% rename from portkey_ai/_vendor/openai-1.40.1.dist-info/REQUESTED rename to portkey_ai/_vendor/openai-1.58.1.dist-info/REQUESTED diff --git a/portkey_ai/_vendor/openai-1.40.1.dist-info/WHEEL b/portkey_ai/_vendor/openai-1.58.1.dist-info/WHEEL similarity index 67% rename from portkey_ai/_vendor/openai-1.40.1.dist-info/WHEEL rename to portkey_ai/_vendor/openai-1.58.1.dist-info/WHEEL index cdd68a49..12228d41 100644 --- a/portkey_ai/_vendor/openai-1.40.1.dist-info/WHEEL +++ b/portkey_ai/_vendor/openai-1.58.1.dist-info/WHEEL @@ -1,4 +1,4 @@ Wheel-Version: 1.0 -Generator: hatchling 1.25.0 +Generator: hatchling 1.27.0 Root-Is-Purelib: true Tag: py3-none-any diff --git a/portkey_ai/_vendor/openai-1.40.1.dist-info/entry_points.txt b/portkey_ai/_vendor/openai-1.58.1.dist-info/entry_points.txt similarity index 100% rename from portkey_ai/_vendor/openai-1.40.1.dist-info/entry_points.txt rename to portkey_ai/_vendor/openai-1.58.1.dist-info/entry_points.txt diff --git a/portkey_ai/_vendor/openai-1.40.1.dist-info/licenses/LICENSE b/portkey_ai/_vendor/openai-1.58.1.dist-info/licenses/LICENSE similarity index 100% rename from portkey_ai/_vendor/openai-1.40.1.dist-info/licenses/LICENSE rename to portkey_ai/_vendor/openai-1.58.1.dist-info/licenses/LICENSE diff --git a/portkey_ai/_vendor/openai/__init__.py b/portkey_ai/_vendor/openai/__init__.py index 3c1ebb57..21c60f7e 100644 --- a/portkey_ai/_vendor/openai/__init__.py +++ b/portkey_ai/_vendor/openai/__init__.py @@ -6,7 +6,7 @@ from typing_extensions import override from . import types -from ._types import NOT_GIVEN, NoneType, NotGiven, Transport, ProxiesTypes +from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes from ._utils import file_from_path from ._client import Client, OpenAI, Stream, Timeout, Transport, AsyncClient, AsyncOpenAI, AsyncStream, RequestOptions from ._models import BaseModel @@ -43,6 +43,7 @@ "ProxiesTypes", "NotGiven", "NOT_GIVEN", + "Omit", "OpenAIError", "APIError", "APIStatusError", diff --git a/portkey_ai/_vendor/openai/_base_client.py b/portkey_ai/_vendor/openai/_base_client.py index 21053e28..cceec903 100644 --- a/portkey_ai/_vendor/openai/_base_client.py +++ b/portkey_ai/_vendor/openai/_base_client.py @@ -1,5 +1,6 @@ from __future__ import annotations +import sys import json import time import uuid @@ -61,7 +62,7 @@ HttpxRequestFiles, ModelBuilderProtocol, ) -from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping +from ._utils import SensitiveHeadersFilter, is_dict, is_list, asyncify, is_given, lru_cache, is_mapping from ._compat import model_copy, model_dump from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type from ._response import ( @@ -89,6 +90,7 @@ from ._legacy_response import LegacyAPIResponse log: logging.Logger = logging.getLogger(__name__) +log.addFilter(SensitiveHeadersFilter()) # TODO: make base page type vars covariant SyncPageT = TypeVar("SyncPageT", bound="BaseSyncPage[Any]") @@ -143,6 +145,12 @@ def __init__( self.url = url self.params = params + @override + def __repr__(self) -> str: + if self.url: + return f"{self.__class__.__name__}(url={self.url})" + return f"{self.__class__.__name__}(params={self.params})" + class BasePage(GenericModel, Generic[_T]): """ @@ -400,14 +408,7 @@ def _make_status_error( ) -> _exceptions.APIStatusError: raise NotImplementedError() - def _remaining_retries( - self, - remaining_retries: Optional[int], - options: FinalRequestOptions, - ) -> int: - return remaining_retries if remaining_retries is not None else options.get_max_retries(self.max_retries) - - def _build_headers(self, options: FinalRequestOptions) -> httpx.Headers: + def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0) -> httpx.Headers: custom_headers = options.headers or {} headers_dict = _merge_mappings(self.default_headers, custom_headers) self._validate_headers(headers_dict, custom_headers) @@ -419,6 +420,11 @@ def _build_headers(self, options: FinalRequestOptions) -> httpx.Headers: if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers: headers[idempotency_header] = options.idempotency_key or self._idempotency_key() + # Don't set the retry count header if it was already set or removed by the caller. We check + # `custom_headers`, which can contain `Omit()`, instead of `headers` to account for the removal case. + if "x-stainless-retry-count" not in (header.lower() for header in custom_headers): + headers["x-stainless-retry-count"] = str(retries_taken) + return headers def _prepare_url(self, url: str) -> URL: @@ -440,6 +446,8 @@ def _make_sse_decoder(self) -> SSEDecoder | SSEBytesDecoder: def _build_request( self, options: FinalRequestOptions, + *, + retries_taken: int = 0, ) -> httpx.Request: if log.isEnabledFor(logging.DEBUG): log.debug("Request options: %s", model_dump(options, exclude_unset=True)) @@ -455,7 +463,7 @@ def _build_request( else: raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`") - headers = self._build_headers(options) + headers = self._build_headers(options, retries_taken=retries_taken) params = _merge_mappings(self.default_query, options.params) content_type = headers.get("Content-Type") files = options.files @@ -489,12 +497,17 @@ def _build_request( if not files: files = cast(HttpxRequestFiles, ForceMultipartDict()) + prepared_url = self._prepare_url(options.url) + if "_" in prepared_url.host: + # work around https://github.com/encode/httpx/discussions/2880 + kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")} + # TODO: report this error to httpx return self._client.build_request( # pyright: ignore[reportUnknownMemberType] headers=headers, timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout, method=options.method, - url=self._prepare_url(options.url), + url=prepared_url, # the `Query` type that we use is incompatible with qs' # `Params` type as it needs to be typed as `Mapping[str, object]` # so that passing a `TypedDict` doesn't cause an error. @@ -684,7 +697,8 @@ def _calculate_retry_timeout( if retry_after is not None and 0 < retry_after <= 60: return retry_after - nb_retries = max_retries - remaining_retries + # Also cap retry count to 1000 to avoid any potential overflows with `pow` + nb_retries = min(max_retries - remaining_retries, 1000) # Apply exponential backoff, but not more than the max. sleep_seconds = min(INITIAL_RETRY_DELAY * pow(2.0, nb_retries), MAX_RETRY_DELAY) @@ -849,7 +863,7 @@ def __init__( timeout=cast(Timeout, timeout), limits=limits, follow_redirects=True, - **kwargs, # type: ignore + **kwargs, # type: ignore ) def is_closed(self) -> bool: @@ -935,12 +949,17 @@ def request( stream: bool = False, stream_cls: type[_StreamT] | None = None, ) -> ResponseT | _StreamT: + if remaining_retries is not None: + retries_taken = options.get_max_retries(self.max_retries) - remaining_retries + else: + retries_taken = 0 + return self._request( cast_to=cast_to, options=options, stream=stream, stream_cls=stream_cls, - remaining_retries=remaining_retries, + retries_taken=retries_taken, ) def _request( @@ -948,7 +967,7 @@ def _request( *, cast_to: Type[ResponseT], options: FinalRequestOptions, - remaining_retries: int | None, + retries_taken: int, stream: bool, stream_cls: type[_StreamT] | None, ) -> ResponseT | _StreamT: @@ -960,8 +979,8 @@ def _request( cast_to = self._maybe_override_cast_to(cast_to, options) options = self._prepare_options(options) - retries = self._remaining_retries(remaining_retries, options) - request = self._build_request(options) + remaining_retries = options.get_max_retries(self.max_retries) - retries_taken + request = self._build_request(options, retries_taken=retries_taken) self._prepare_request(request) kwargs: HttpxSendArgs = {} @@ -979,11 +998,11 @@ def _request( except httpx.TimeoutException as err: log.debug("Encountered httpx.TimeoutException", exc_info=True) - if retries > 0: + if remaining_retries > 0: return self._retry_request( input_options, cast_to, - retries, + retries_taken=retries_taken, stream=stream, stream_cls=stream_cls, response_headers=None, @@ -994,11 +1013,11 @@ def _request( except Exception as err: log.debug("Encountered Exception", exc_info=True) - if retries > 0: + if remaining_retries > 0: return self._retry_request( input_options, cast_to, - retries, + retries_taken=retries_taken, stream=stream, stream_cls=stream_cls, response_headers=None, @@ -1022,13 +1041,13 @@ def _request( except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code log.debug("Encountered httpx.HTTPStatusError", exc_info=True) - if retries > 0 and self._should_retry(err.response): + if remaining_retries > 0 and self._should_retry(err.response): err.response.close() return self._retry_request( input_options, cast_to, - retries, - err.response.headers, + retries_taken=retries_taken, + response_headers=err.response.headers, stream=stream, stream_cls=stream_cls, ) @@ -1047,26 +1066,26 @@ def _request( response=response, stream=stream, stream_cls=stream_cls, - retries_taken=options.get_max_retries(self.max_retries) - retries, + retries_taken=retries_taken, ) def _retry_request( self, options: FinalRequestOptions, cast_to: Type[ResponseT], - remaining_retries: int, - response_headers: httpx.Headers | None, *, + retries_taken: int, + response_headers: httpx.Headers | None, stream: bool, stream_cls: type[_StreamT] | None, ) -> ResponseT | _StreamT: - remaining = remaining_retries - 1 - if remaining == 1: + remaining_retries = options.get_max_retries(self.max_retries) - retries_taken + if remaining_retries == 1: log.debug("1 retry left") else: - log.debug("%i retries left", remaining) + log.debug("%i retries left", remaining_retries) - timeout = self._calculate_retry_timeout(remaining, options, response_headers) + timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers) log.info("Retrying request to %s in %f seconds", options.url, timeout) # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a @@ -1076,7 +1095,7 @@ def _retry_request( return self._request( options=options, cast_to=cast_to, - remaining_retries=remaining, + retries_taken=retries_taken + 1, stream=stream, stream_cls=stream_cls, ) @@ -1427,7 +1446,7 @@ def __init__( timeout=cast(Timeout, timeout), limits=limits, follow_redirects=True, - **kwargs, # type: ignore + **kwargs, # type: ignore ) def is_closed(self) -> bool: @@ -1510,12 +1529,17 @@ async def request( stream_cls: type[_AsyncStreamT] | None = None, remaining_retries: Optional[int] = None, ) -> ResponseT | _AsyncStreamT: + if remaining_retries is not None: + retries_taken = options.get_max_retries(self.max_retries) - remaining_retries + else: + retries_taken = 0 + return await self._request( cast_to=cast_to, options=options, stream=stream, stream_cls=stream_cls, - remaining_retries=remaining_retries, + retries_taken=retries_taken, ) async def _request( @@ -1525,7 +1549,7 @@ async def _request( *, stream: bool, stream_cls: type[_AsyncStreamT] | None, - remaining_retries: int | None, + retries_taken: int, ) -> ResponseT | _AsyncStreamT: if self._platform is None: # `get_platform` can make blocking IO calls so we @@ -1540,8 +1564,8 @@ async def _request( cast_to = self._maybe_override_cast_to(cast_to, options) options = await self._prepare_options(options) - retries = self._remaining_retries(remaining_retries, options) - request = self._build_request(options) + remaining_retries = options.get_max_retries(self.max_retries) - retries_taken + request = self._build_request(options, retries_taken=retries_taken) await self._prepare_request(request) kwargs: HttpxSendArgs = {} @@ -1557,11 +1581,11 @@ async def _request( except httpx.TimeoutException as err: log.debug("Encountered httpx.TimeoutException", exc_info=True) - if retries > 0: + if remaining_retries > 0: return await self._retry_request( input_options, cast_to, - retries, + retries_taken=retries_taken, stream=stream, stream_cls=stream_cls, response_headers=None, @@ -1572,11 +1596,11 @@ async def _request( except Exception as err: log.debug("Encountered Exception", exc_info=True) - if retries > 0: + if remaining_retries > 0: return await self._retry_request( input_options, cast_to, - retries, + retries_taken=retries_taken, stream=stream, stream_cls=stream_cls, response_headers=None, @@ -1594,13 +1618,13 @@ async def _request( except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code log.debug("Encountered httpx.HTTPStatusError", exc_info=True) - if retries > 0 and self._should_retry(err.response): + if remaining_retries > 0 and self._should_retry(err.response): await err.response.aclose() return await self._retry_request( input_options, cast_to, - retries, - err.response.headers, + retries_taken=retries_taken, + response_headers=err.response.headers, stream=stream, stream_cls=stream_cls, ) @@ -1619,26 +1643,26 @@ async def _request( response=response, stream=stream, stream_cls=stream_cls, - retries_taken=options.get_max_retries(self.max_retries) - retries, + retries_taken=retries_taken, ) async def _retry_request( self, options: FinalRequestOptions, cast_to: Type[ResponseT], - remaining_retries: int, - response_headers: httpx.Headers | None, *, + retries_taken: int, + response_headers: httpx.Headers | None, stream: bool, stream_cls: type[_AsyncStreamT] | None, ) -> ResponseT | _AsyncStreamT: - remaining = remaining_retries - 1 - if remaining == 1: + remaining_retries = options.get_max_retries(self.max_retries) - retries_taken + if remaining_retries == 1: log.debug("1 retry left") else: - log.debug("%i retries left", remaining) + log.debug("%i retries left", remaining_retries) - timeout = self._calculate_retry_timeout(remaining, options, response_headers) + timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers) log.info("Retrying request to %s in %f seconds", options.url, timeout) await anyio.sleep(timeout) @@ -1646,7 +1670,7 @@ async def _retry_request( return await self._request( options=options, cast_to=cast_to, - remaining_retries=remaining, + retries_taken=retries_taken + 1, stream=stream, stream_cls=stream_cls, ) @@ -2016,7 +2040,6 @@ def get_python_version() -> str: def get_architecture() -> Arch: try: - python_bitness, _ = platform.architecture() machine = platform.machine().lower() except Exception: return "unknown" @@ -2032,7 +2055,7 @@ def get_architecture() -> Arch: return "x64" # TODO: untested - if python_bitness == "32bit": + if sys.maxsize <= 2**32: return "x32" if machine: diff --git a/portkey_ai/_vendor/openai/_client.py b/portkey_ai/_vendor/openai/_client.py index a1f7dde4..c784694f 100644 --- a/portkey_ai/_vendor/openai/_client.py +++ b/portkey_ai/_vendor/openai/_client.py @@ -8,7 +8,7 @@ import httpx -from . import resources, _exceptions +from . import _exceptions from ._qs import Querystring from ._types import ( NOT_GIVEN, @@ -25,6 +25,7 @@ get_async_library, ) from ._version import __version__ +from .resources import files, images, models, batches, embeddings, completions, moderations from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import OpenAIError, APIStatusError from ._base_client import ( @@ -32,33 +33,28 @@ SyncAPIClient, AsyncAPIClient, ) +from .resources.beta import beta +from .resources.chat import chat +from .resources.audio import audio +from .resources.uploads import uploads +from .resources.fine_tuning import fine_tuning -__all__ = [ - "Timeout", - "Transport", - "ProxiesTypes", - "RequestOptions", - "resources", - "OpenAI", - "AsyncOpenAI", - "Client", - "AsyncClient", -] +__all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"] class OpenAI(SyncAPIClient): - completions: resources.Completions - chat: resources.Chat - embeddings: resources.Embeddings - files: resources.Files - images: resources.Images - audio: resources.Audio - moderations: resources.Moderations - models: resources.Models - fine_tuning: resources.FineTuning - beta: resources.Beta - batches: resources.Batches - uploads: resources.Uploads # TODO + completions: completions.Completions + chat: chat.Chat + embeddings: embeddings.Embeddings + files: files.Files + images: images.Images + audio: audio.Audio + moderations: moderations.Moderations + models: models.Models + fine_tuning: fine_tuning.FineTuning + beta: beta.Beta + batches: batches.Batches + uploads: uploads.Uploads with_raw_response: OpenAIWithRawResponse with_streaming_response: OpenAIWithStreamedResponse @@ -67,6 +63,14 @@ class OpenAI(SyncAPIClient): organization: str | None project: str | None + websocket_base_url: str | httpx.URL | None + """Base URL for WebSocket connections. + + If not specified, the default base URL will be used, with 'wss://' replacing the + 'http://' or 'https://' scheme. For example: 'http://example.com' becomes + 'wss://example.com' + """ + def __init__( self, *, @@ -74,6 +78,7 @@ def __init__( organization: str | None = None, project: str | None = None, base_url: str | httpx.URL | None = None, + websocket_base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, @@ -115,6 +120,8 @@ def __init__( project = os.environ.get("OPENAI_PROJECT_ID") self.project = project + self.websocket_base_url = websocket_base_url + if base_url is None: base_url = os.environ.get("OPENAI_BASE_URL") if base_url is None: @@ -133,18 +140,18 @@ def __init__( self._default_stream_cls = Stream - self.completions = resources.Completions(self) - self.chat = resources.Chat(self) - self.embeddings = resources.Embeddings(self) - self.files = resources.Files(self) - self.images = resources.Images(self) - self.audio = resources.Audio(self) - self.moderations = resources.Moderations(self) - self.models = resources.Models(self) - self.fine_tuning = resources.FineTuning(self) - self.beta = resources.Beta(self) - self.batches = resources.Batches(self) - self.uploads = resources.Uploads(self) + self.completions = completions.Completions(self) + self.chat = chat.Chat(self) + self.embeddings = embeddings.Embeddings(self) + self.files = files.Files(self) + self.images = images.Images(self) + self.audio = audio.Audio(self) + self.moderations = moderations.Moderations(self) + self.models = models.Models(self) + self.fine_tuning = fine_tuning.FineTuning(self) + self.beta = beta.Beta(self) + self.batches = batches.Batches(self) + self.uploads = uploads.Uploads(self) self.with_raw_response = OpenAIWithRawResponse(self) self.with_streaming_response = OpenAIWithStreamedResponse(self) @@ -176,6 +183,7 @@ def copy( api_key: str | None = None, organization: str | None = None, project: str | None = None, + websocket_base_url: str | httpx.URL | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.Client | None = None, @@ -212,6 +220,7 @@ def copy( api_key=api_key or self.api_key, organization=organization or self.organization, project=project or self.project, + websocket_base_url=websocket_base_url or self.websocket_base_url, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, @@ -261,18 +270,18 @@ def _make_status_error( class AsyncOpenAI(AsyncAPIClient): - completions: resources.AsyncCompletions - chat: resources.AsyncChat - embeddings: resources.AsyncEmbeddings - files: resources.AsyncFiles - images: resources.AsyncImages - audio: resources.AsyncAudio - moderations: resources.AsyncModerations - models: resources.AsyncModels - fine_tuning: resources.AsyncFineTuning - beta: resources.AsyncBeta - batches: resources.AsyncBatches - uploads: resources.AsyncUploads + completions: completions.AsyncCompletions + chat: chat.AsyncChat + embeddings: embeddings.AsyncEmbeddings + files: files.AsyncFiles + images: images.AsyncImages + audio: audio.AsyncAudio + moderations: moderations.AsyncModerations + models: models.AsyncModels + fine_tuning: fine_tuning.AsyncFineTuning + beta: beta.AsyncBeta + batches: batches.AsyncBatches + uploads: uploads.AsyncUploads with_raw_response: AsyncOpenAIWithRawResponse with_streaming_response: AsyncOpenAIWithStreamedResponse @@ -281,6 +290,14 @@ class AsyncOpenAI(AsyncAPIClient): organization: str | None project: str | None + websocket_base_url: str | httpx.URL | None + """Base URL for WebSocket connections. + + If not specified, the default base URL will be used, with 'wss://' replacing the + 'http://' or 'https://' scheme. For example: 'http://example.com' becomes + 'wss://example.com' + """ + def __init__( self, *, @@ -288,6 +305,7 @@ def __init__( organization: str | None = None, project: str | None = None, base_url: str | httpx.URL | None = None, + websocket_base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, @@ -329,6 +347,8 @@ def __init__( project = os.environ.get("OPENAI_PROJECT_ID") self.project = project + self.websocket_base_url = websocket_base_url + if base_url is None: base_url = os.environ.get("OPENAI_BASE_URL") if base_url is None: @@ -347,18 +367,18 @@ def __init__( self._default_stream_cls = AsyncStream - self.completions = resources.AsyncCompletions(self) - self.chat = resources.AsyncChat(self) - self.embeddings = resources.AsyncEmbeddings(self) - self.files = resources.AsyncFiles(self) - self.images = resources.AsyncImages(self) - self.audio = resources.AsyncAudio(self) - self.moderations = resources.AsyncModerations(self) - self.models = resources.AsyncModels(self) - self.fine_tuning = resources.AsyncFineTuning(self) - self.beta = resources.AsyncBeta(self) - self.batches = resources.AsyncBatches(self) - self.uploads = resources.AsyncUploads(self) + self.completions = completions.AsyncCompletions(self) + self.chat = chat.AsyncChat(self) + self.embeddings = embeddings.AsyncEmbeddings(self) + self.files = files.AsyncFiles(self) + self.images = images.AsyncImages(self) + self.audio = audio.AsyncAudio(self) + self.moderations = moderations.AsyncModerations(self) + self.models = models.AsyncModels(self) + self.fine_tuning = fine_tuning.AsyncFineTuning(self) + self.beta = beta.AsyncBeta(self) + self.batches = batches.AsyncBatches(self) + self.uploads = uploads.AsyncUploads(self) self.with_raw_response = AsyncOpenAIWithRawResponse(self) self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self) @@ -390,6 +410,7 @@ def copy( api_key: str | None = None, organization: str | None = None, project: str | None = None, + websocket_base_url: str | httpx.URL | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.AsyncClient | None = None, @@ -426,6 +447,7 @@ def copy( api_key=api_key or self.api_key, organization=organization or self.organization, project=project or self.project, + websocket_base_url=websocket_base_url or self.websocket_base_url, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, @@ -476,66 +498,66 @@ def _make_status_error( class OpenAIWithRawResponse: def __init__(self, client: OpenAI) -> None: - self.completions = resources.CompletionsWithRawResponse(client.completions) - self.chat = resources.ChatWithRawResponse(client.chat) - self.embeddings = resources.EmbeddingsWithRawResponse(client.embeddings) - self.files = resources.FilesWithRawResponse(client.files) - self.images = resources.ImagesWithRawResponse(client.images) - self.audio = resources.AudioWithRawResponse(client.audio) - self.moderations = resources.ModerationsWithRawResponse(client.moderations) - self.models = resources.ModelsWithRawResponse(client.models) - self.fine_tuning = resources.FineTuningWithRawResponse(client.fine_tuning) - self.beta = resources.BetaWithRawResponse(client.beta) - self.batches = resources.BatchesWithRawResponse(client.batches) - self.uploads = resources.UploadsWithRawResponse(client.uploads) + self.completions = completions.CompletionsWithRawResponse(client.completions) + self.chat = chat.ChatWithRawResponse(client.chat) + self.embeddings = embeddings.EmbeddingsWithRawResponse(client.embeddings) + self.files = files.FilesWithRawResponse(client.files) + self.images = images.ImagesWithRawResponse(client.images) + self.audio = audio.AudioWithRawResponse(client.audio) + self.moderations = moderations.ModerationsWithRawResponse(client.moderations) + self.models = models.ModelsWithRawResponse(client.models) + self.fine_tuning = fine_tuning.FineTuningWithRawResponse(client.fine_tuning) + self.beta = beta.BetaWithRawResponse(client.beta) + self.batches = batches.BatchesWithRawResponse(client.batches) + self.uploads = uploads.UploadsWithRawResponse(client.uploads) class AsyncOpenAIWithRawResponse: def __init__(self, client: AsyncOpenAI) -> None: - self.completions = resources.AsyncCompletionsWithRawResponse(client.completions) - self.chat = resources.AsyncChatWithRawResponse(client.chat) - self.embeddings = resources.AsyncEmbeddingsWithRawResponse(client.embeddings) - self.files = resources.AsyncFilesWithRawResponse(client.files) - self.images = resources.AsyncImagesWithRawResponse(client.images) - self.audio = resources.AsyncAudioWithRawResponse(client.audio) - self.moderations = resources.AsyncModerationsWithRawResponse(client.moderations) - self.models = resources.AsyncModelsWithRawResponse(client.models) - self.fine_tuning = resources.AsyncFineTuningWithRawResponse(client.fine_tuning) - self.beta = resources.AsyncBetaWithRawResponse(client.beta) - self.batches = resources.AsyncBatchesWithRawResponse(client.batches) - self.uploads = resources.AsyncUploadsWithRawResponse(client.uploads) + self.completions = completions.AsyncCompletionsWithRawResponse(client.completions) + self.chat = chat.AsyncChatWithRawResponse(client.chat) + self.embeddings = embeddings.AsyncEmbeddingsWithRawResponse(client.embeddings) + self.files = files.AsyncFilesWithRawResponse(client.files) + self.images = images.AsyncImagesWithRawResponse(client.images) + self.audio = audio.AsyncAudioWithRawResponse(client.audio) + self.moderations = moderations.AsyncModerationsWithRawResponse(client.moderations) + self.models = models.AsyncModelsWithRawResponse(client.models) + self.fine_tuning = fine_tuning.AsyncFineTuningWithRawResponse(client.fine_tuning) + self.beta = beta.AsyncBetaWithRawResponse(client.beta) + self.batches = batches.AsyncBatchesWithRawResponse(client.batches) + self.uploads = uploads.AsyncUploadsWithRawResponse(client.uploads) class OpenAIWithStreamedResponse: def __init__(self, client: OpenAI) -> None: - self.completions = resources.CompletionsWithStreamingResponse(client.completions) - self.chat = resources.ChatWithStreamingResponse(client.chat) - self.embeddings = resources.EmbeddingsWithStreamingResponse(client.embeddings) - self.files = resources.FilesWithStreamingResponse(client.files) - self.images = resources.ImagesWithStreamingResponse(client.images) - self.audio = resources.AudioWithStreamingResponse(client.audio) - self.moderations = resources.ModerationsWithStreamingResponse(client.moderations) - self.models = resources.ModelsWithStreamingResponse(client.models) - self.fine_tuning = resources.FineTuningWithStreamingResponse(client.fine_tuning) - self.beta = resources.BetaWithStreamingResponse(client.beta) - self.batches = resources.BatchesWithStreamingResponse(client.batches) - self.uploads = resources.UploadsWithStreamingResponse(client.uploads) + self.completions = completions.CompletionsWithStreamingResponse(client.completions) + self.chat = chat.ChatWithStreamingResponse(client.chat) + self.embeddings = embeddings.EmbeddingsWithStreamingResponse(client.embeddings) + self.files = files.FilesWithStreamingResponse(client.files) + self.images = images.ImagesWithStreamingResponse(client.images) + self.audio = audio.AudioWithStreamingResponse(client.audio) + self.moderations = moderations.ModerationsWithStreamingResponse(client.moderations) + self.models = models.ModelsWithStreamingResponse(client.models) + self.fine_tuning = fine_tuning.FineTuningWithStreamingResponse(client.fine_tuning) + self.beta = beta.BetaWithStreamingResponse(client.beta) + self.batches = batches.BatchesWithStreamingResponse(client.batches) + self.uploads = uploads.UploadsWithStreamingResponse(client.uploads) class AsyncOpenAIWithStreamedResponse: def __init__(self, client: AsyncOpenAI) -> None: - self.completions = resources.AsyncCompletionsWithStreamingResponse(client.completions) - self.chat = resources.AsyncChatWithStreamingResponse(client.chat) - self.embeddings = resources.AsyncEmbeddingsWithStreamingResponse(client.embeddings) - self.files = resources.AsyncFilesWithStreamingResponse(client.files) - self.images = resources.AsyncImagesWithStreamingResponse(client.images) - self.audio = resources.AsyncAudioWithStreamingResponse(client.audio) - self.moderations = resources.AsyncModerationsWithStreamingResponse(client.moderations) - self.models = resources.AsyncModelsWithStreamingResponse(client.models) - self.fine_tuning = resources.AsyncFineTuningWithStreamingResponse(client.fine_tuning) - self.beta = resources.AsyncBetaWithStreamingResponse(client.beta) - self.batches = resources.AsyncBatchesWithStreamingResponse(client.batches) - self.uploads = resources.AsyncUploadsWithStreamingResponse(client.uploads) + self.completions = completions.AsyncCompletionsWithStreamingResponse(client.completions) + self.chat = chat.AsyncChatWithStreamingResponse(client.chat) + self.embeddings = embeddings.AsyncEmbeddingsWithStreamingResponse(client.embeddings) + self.files = files.AsyncFilesWithStreamingResponse(client.files) + self.images = images.AsyncImagesWithStreamingResponse(client.images) + self.audio = audio.AsyncAudioWithStreamingResponse(client.audio) + self.moderations = moderations.AsyncModerationsWithStreamingResponse(client.moderations) + self.models = models.AsyncModelsWithStreamingResponse(client.models) + self.fine_tuning = fine_tuning.AsyncFineTuningWithStreamingResponse(client.fine_tuning) + self.beta = beta.AsyncBetaWithStreamingResponse(client.beta) + self.batches = batches.AsyncBatchesWithStreamingResponse(client.batches) + self.uploads = uploads.AsyncUploadsWithStreamingResponse(client.uploads) Client = OpenAI diff --git a/portkey_ai/_vendor/openai/_compat.py b/portkey_ai/_vendor/openai/_compat.py index c0dd8c1e..87fc3707 100644 --- a/portkey_ai/_vendor/openai/_compat.py +++ b/portkey_ai/_vendor/openai/_compat.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload from datetime import date, datetime -from typing_extensions import Self +from typing_extensions import Self, Literal import pydantic from pydantic.fields import FieldInfo @@ -133,15 +133,20 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: def model_dump( model: pydantic.BaseModel, *, - exclude: IncEx = None, + exclude: IncEx | None = None, exclude_unset: bool = False, exclude_defaults: bool = False, + warnings: bool = True, + mode: Literal["json", "python"] = "python", ) -> dict[str, Any]: - if PYDANTIC_V2: + if PYDANTIC_V2 or hasattr(model, "model_dump"): return model.model_dump( + mode=mode, exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, + # warnings are not supported in Pydantic v1 + warnings=warnings if PYDANTIC_V2 else True, ) return cast( "dict[str, Any]", @@ -221,9 +226,6 @@ def __set_name__(self, owner: type[Any], name: str) -> None: ... # __set__ is not defined at runtime, but @cached_property is designed to be settable def __set__(self, instance: object, value: _T) -> None: ... else: - try: - from functools import cached_property as cached_property - except ImportError: - from cached_property import cached_property as cached_property + from functools import cached_property as cached_property typed_cached_property = cached_property diff --git a/portkey_ai/_vendor/openai/_exceptions.py b/portkey_ai/_vendor/openai/_exceptions.py index 3a87c92b..e326ed95 100644 --- a/portkey_ai/_vendor/openai/_exceptions.py +++ b/portkey_ai/_vendor/openai/_exceptions.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Any, Optional, cast +from typing import TYPE_CHECKING, Any, Optional, cast from typing_extensions import Literal import httpx @@ -10,6 +10,9 @@ from ._utils import is_dict from ._models import construct_type +if TYPE_CHECKING: + from .types.chat import ChatCompletion + __all__ = [ "BadRequestError", "AuthenticationError", @@ -88,11 +91,7 @@ def __init__(self, message: str, *, response: httpx.Response, body: object | Non class APIConnectionError(APIError): - LOCALHOST_CONNECTION_ERROR = """Could not instantiate the Portkey client. \ -You can either add a valid `api_key` parameter (from https://app.portkey.ai/api-keys) \ -or check the `base_url` parameter in the Portkey client, for your AI Gateway's instance's URL. -""" - def __init__(self, *, message: str = LOCALHOST_CONNECTION_ERROR, request: httpx.Request) -> None: + def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None: super().__init__(message, request, body=None) @@ -134,10 +133,20 @@ class InternalServerError(APIStatusError): class LengthFinishReasonError(OpenAIError): - def __init__(self) -> None: - super().__init__( - f"Could not parse response content as the length limit was reached", - ) + completion: ChatCompletion + """The completion that caused this error. + + Note: this will *not* be a complete `ChatCompletion` object when streaming as `usage` + will not be included. + """ + + def __init__(self, *, completion: ChatCompletion) -> None: + msg = "Could not parse response content as the length limit was reached" + if completion.usage: + msg += f" - {completion.usage}" + + super().__init__(msg) + self.completion = completion class ContentFilterFinishReasonError(OpenAIError): diff --git a/portkey_ai/_vendor/openai/_legacy_response.py b/portkey_ai/_vendor/openai/_legacy_response.py index c42fb8b8..7a14f27a 100644 --- a/portkey_ai/_vendor/openai/_legacy_response.py +++ b/portkey_ai/_vendor/openai/_legacy_response.py @@ -24,8 +24,8 @@ import pydantic from ._types import NoneType -from ._utils import is_given, extract_type_arg, is_annotated_type -from ._models import BaseModel, is_basemodel +from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type +from ._models import BaseModel, is_basemodel, add_request_id from ._constants import RAW_RESPONSE_HEADER from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type from ._exceptions import APIResponseValidationError @@ -138,8 +138,11 @@ class MyModel(BaseModel): if is_given(self._options.post_parser): parsed = self._options.post_parser(parsed) + if isinstance(parsed, BaseModel): + add_request_id(parsed, self.request_id) + self._parsed_by_type[cache_key] = parsed - return parsed + return cast(R, parsed) @property def headers(self) -> httpx.Headers: @@ -192,9 +195,15 @@ def elapsed(self) -> datetime.timedelta: return self.http_response.elapsed def _parse(self, *, to: type[_T] | None = None) -> R | _T: + cast_to = to if to is not None else self._cast_to + + # unwrap `TypeAlias('Name', T)` -> `T` + if is_type_alias_type(cast_to): + cast_to = cast_to.__value__ # type: ignore[unreachable] + # unwrap `Annotated[T, ...]` -> `T` - if to and is_annotated_type(to): - to = extract_type_arg(to, 0) + if cast_to and is_annotated_type(cast_to): + cast_to = extract_type_arg(cast_to, 0) if self._stream: if to: @@ -230,18 +239,12 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: return cast( R, stream_cls( - cast_to=self._cast_to, + cast_to=cast_to, response=self.http_response, client=cast(Any, self._client), ), ) - cast_to = to if to is not None else self._cast_to - - # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(cast_to): - cast_to = extract_type_arg(cast_to, 0) - if cast_to is NoneType: return cast(R, None) @@ -255,6 +258,9 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to == float: return cast(R, float(response.text)) + if cast_to == bool: + return cast(R, response.text.lower() == "true") + origin = get_origin(cast_to) or cast_to if inspect.isclass(origin) and issubclass(origin, HttpxBinaryResponseContent): diff --git a/portkey_ai/_vendor/openai/_models.py b/portkey_ai/_vendor/openai/_models.py index 5148d5a7..2f67e5eb 100644 --- a/portkey_ai/_vendor/openai/_models.py +++ b/portkey_ai/_vendor/openai/_models.py @@ -2,7 +2,7 @@ import os import inspect -from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast +from typing import TYPE_CHECKING, Any, Type, Tuple, Union, Generic, TypeVar, Callable, Optional, cast from datetime import date, datetime from typing_extensions import ( Unpack, @@ -10,6 +10,7 @@ ClassVar, Protocol, Required, + Sequence, ParamSpec, TypedDict, TypeGuard, @@ -37,6 +38,7 @@ PropertyInfo, is_list, is_given, + json_safe, lru_cache, is_mapping, parse_date, @@ -45,6 +47,7 @@ strip_not_given, extract_type_arg, is_annotated_type, + is_type_alias_type, strip_annotated_type, ) from ._compat import ( @@ -72,6 +75,8 @@ P = ParamSpec("P") +ReprArgs = Sequence[Tuple[Optional[str], Any]] + @runtime_checkable class _ConfigProtocol(Protocol): @@ -94,6 +99,28 @@ def model_fields_set(self) -> set[str]: class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] extra: Any = pydantic.Extra.allow # type: ignore + @override + def __repr_args__(self) -> ReprArgs: + # we don't want these attributes to be included when something like `rich.print` is used + return [arg for arg in super().__repr_args__() if arg[0] not in {"_request_id", "__exclude_fields__"}] + + if TYPE_CHECKING: + _request_id: Optional[str] = None + """The ID of the request, returned via the X-Request-ID header. Useful for debugging requests and reporting issues to OpenAI. + + This will **only** be set for the top-level response object, it will not be defined for nested objects. For example: + + ```py + completion = await client.chat.completions.create(...) + completion._request_id # req_id_xxx + completion.usage._request_id # raises `AttributeError` + ``` + + Note: unlike other properties that use an `_` prefix, this property + *is* public. Unless documented otherwise, all other `_` prefix properties, + methods and modules are *private*. + """ + def to_dict( self, *, @@ -176,7 +203,7 @@ def __str__(self) -> str: # Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836. @classmethod @override - def construct( + def construct( # pyright: ignore[reportIncompatibleMethodOverride] cls: Type[ModelT], _fields_set: set[str] | None = None, **values: object, @@ -248,8 +275,8 @@ def model_dump( self, *, mode: Literal["json", "python"] | str = "python", - include: IncEx = None, - exclude: IncEx = None, + include: IncEx | None = None, + exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, @@ -279,8 +306,8 @@ def model_dump( Returns: A dictionary representation of the model. """ - if mode != "python": - raise ValueError("mode is only supported in Pydantic v2") + if mode not in {"json", "python"}: + raise ValueError("mode must be either 'json' or 'python'") if round_trip != False: raise ValueError("round_trip is only supported in Pydantic v2") if warnings != True: @@ -289,7 +316,7 @@ def model_dump( raise ValueError("context is only supported in Pydantic v2") if serialize_as_any != False: raise ValueError("serialize_as_any is only supported in Pydantic v2") - return super().dict( # pyright: ignore[reportDeprecated] + dumped = super().dict( # pyright: ignore[reportDeprecated] include=include, exclude=exclude, by_alias=by_alias, @@ -298,13 +325,15 @@ def model_dump( exclude_none=exclude_none, ) + return cast(dict[str, Any], json_safe(dumped)) if mode == "json" else dumped + @override def model_dump_json( self, *, indent: int | None = None, - include: IncEx = None, - exclude: IncEx = None, + include: IncEx | None = None, + exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, @@ -380,6 +409,8 @@ def is_basemodel(type_: type) -> bool: def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]: origin = get_origin(type_) or type_ + if not inspect.isclass(origin): + return False return issubclass(origin, BaseModel) or issubclass(origin, GenericModel) @@ -423,6 +454,8 @@ def construct_type(*, value: object, type_: object) -> object: # we allow `object` as the input type because otherwise, passing things like # `Literal['value']` will be reported as a type error by type checkers type_ = cast("type[object]", type_) + if is_type_alias_type(type_): + type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` if is_annotated_type(type_): @@ -660,6 +693,21 @@ def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None: setattr(typ, "__pydantic_config__", config) # noqa: B010 +def add_request_id(obj: BaseModel, request_id: str | None) -> None: + obj._request_id = request_id + + # in Pydantic v1, using setattr like we do above causes the attribute + # to be included when serializing the model which we don't want in this + # case so we need to explicitly exclude it + if not PYDANTIC_V2: + try: + exclude_fields = obj.__exclude_fields__ # type: ignore + except AttributeError: + cast(Any, obj).__exclude_fields__ = {"_request_id", "__exclude_fields__"} + else: + cast(Any, obj).__exclude_fields__ = {*(exclude_fields or {}), "_request_id", "__exclude_fields__"} + + # our use of subclasssing here causes weirdness for type checkers, # so we just pretend that we don't subclass if TYPE_CHECKING: diff --git a/portkey_ai/_vendor/openai/_response.py b/portkey_ai/_vendor/openai/_response.py index f9d91786..15274465 100644 --- a/portkey_ai/_vendor/openai/_response.py +++ b/portkey_ai/_vendor/openai/_response.py @@ -25,8 +25,8 @@ import pydantic from ._types import NoneType -from ._utils import is_given, extract_type_arg, is_annotated_type, extract_type_var_from_base -from ._models import BaseModel, is_basemodel +from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type, extract_type_var_from_base +from ._models import BaseModel, is_basemodel, add_request_id from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type from ._exceptions import OpenAIError, APIResponseValidationError @@ -126,9 +126,15 @@ def __repr__(self) -> str: ) def _parse(self, *, to: type[_T] | None = None) -> R | _T: + cast_to = to if to is not None else self._cast_to + + # unwrap `TypeAlias('Name', T)` -> `T` + if is_type_alias_type(cast_to): + cast_to = cast_to.__value__ # type: ignore[unreachable] + # unwrap `Annotated[T, ...]` -> `T` - if to and is_annotated_type(to): - to = extract_type_arg(to, 0) + if cast_to and is_annotated_type(cast_to): + cast_to = extract_type_arg(cast_to, 0) if self._is_sse_stream: if to: @@ -164,18 +170,12 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: return cast( R, stream_cls( - cast_to=self._cast_to, + cast_to=cast_to, response=self.http_response, client=cast(Any, self._client), ), ) - cast_to = to if to is not None else self._cast_to - - # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(cast_to): - cast_to = extract_type_arg(cast_to, 0) - if cast_to is NoneType: return cast(R, None) @@ -192,6 +192,9 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to == float: return cast(R, float(response.text)) + if cast_to == bool: + return cast(R, response.text.lower() == "true") + origin = get_origin(cast_to) or cast_to # handle the legacy binary response case @@ -315,8 +318,11 @@ class MyModel(BaseModel): if is_given(self._options.post_parser): parsed = self._options.post_parser(parsed) + if isinstance(parsed, BaseModel): + add_request_id(parsed, self.request_id) + self._parsed_by_type[cache_key] = parsed - return parsed + return cast(R, parsed) def read(self) -> bytes: """Read and return the binary response content.""" @@ -419,8 +425,11 @@ class MyModel(BaseModel): if is_given(self._options.post_parser): parsed = self._options.post_parser(parsed) + if isinstance(parsed, BaseModel): + add_request_id(parsed, self.request_id) + self._parsed_by_type[cache_key] = parsed - return parsed + return cast(R, parsed) async def read(self) -> bytes: """Read and return the binary response content.""" diff --git a/portkey_ai/_vendor/openai/_types.py b/portkey_ai/_vendor/openai/_types.py index 5611b2d3..a5cf207a 100644 --- a/portkey_ai/_vendor/openai/_types.py +++ b/portkey_ai/_vendor/openai/_types.py @@ -16,7 +16,7 @@ Optional, Sequence, ) -from typing_extensions import Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable +from typing_extensions import Set, Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable import httpx import pydantic @@ -194,8 +194,8 @@ def get(self, __key: str) -> str | None: ... StrBytesIntFloat = Union[str, bytes, int, float] # Note: copied from Pydantic -# https://github.com/pydantic/pydantic/blob/32ea570bf96e84234d2992e1ddf40ab8a565925a/pydantic/main.py#L49 -IncEx: TypeAlias = "set[int] | set[str] | dict[int, Any] | dict[str, Any] | None" +# https://github.com/pydantic/pydantic/blob/6f31f8f68ef011f84357330186f603ff295312fd/pydantic/main.py#L79 +IncEx: TypeAlias = Union[Set[int], Set[str], Mapping[int, Union["IncEx", bool]], Mapping[str, Union["IncEx", bool]]] PostParser = Callable[[Any], Any] diff --git a/portkey_ai/_vendor/openai/_utils/__init__.py b/portkey_ai/_vendor/openai/_utils/__init__.py index 3efe66c8..af2c9bb7 100644 --- a/portkey_ai/_vendor/openai/_utils/__init__.py +++ b/portkey_ai/_vendor/openai/_utils/__init__.py @@ -1,3 +1,4 @@ +from ._logs import SensitiveHeadersFilter as SensitiveHeadersFilter from ._sync import asyncify as asyncify from ._proxy import LazyProxy as LazyProxy from ._utils import ( @@ -6,6 +7,7 @@ is_list as is_list, is_given as is_given, is_tuple as is_tuple, + json_safe as json_safe, lru_cache as lru_cache, is_mapping as is_mapping, is_tuple_t as is_tuple_t, @@ -38,6 +40,7 @@ is_iterable_type as is_iterable_type, is_required_type as is_required_type, is_annotated_type as is_annotated_type, + is_type_alias_type as is_type_alias_type, strip_annotated_type as strip_annotated_type, extract_type_var_from_base as extract_type_var_from_base, ) diff --git a/portkey_ai/_vendor/openai/_utils/_logs.py b/portkey_ai/_vendor/openai/_utils/_logs.py index e5113fd8..37694693 100644 --- a/portkey_ai/_vendor/openai/_utils/_logs.py +++ b/portkey_ai/_vendor/openai/_utils/_logs.py @@ -1,10 +1,16 @@ import os import logging +from typing_extensions import override + +from ._utils import is_dict logger: logging.Logger = logging.getLogger("openai") httpx_logger: logging.Logger = logging.getLogger("httpx") +SENSITIVE_HEADERS = {"api-key", "authorization"} + + def _basic_config() -> None: # e.g. [2023-10-05 14:12:26 - openai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( @@ -23,3 +29,14 @@ def setup_logging() -> None: _basic_config() logger.setLevel(logging.INFO) httpx_logger.setLevel(logging.INFO) + + +class SensitiveHeadersFilter(logging.Filter): + @override + def filter(self, record: logging.LogRecord) -> bool: + if is_dict(record.args) and "headers" in record.args and is_dict(record.args["headers"]): + headers = record.args["headers"] = {**record.args["headers"]} + for header in headers: + if str(header).lower() in SENSITIVE_HEADERS: + headers[header] = "" + return True diff --git a/portkey_ai/_vendor/openai/_utils/_reflection.py b/portkey_ai/_vendor/openai/_utils/_reflection.py index 89aa712a..bdaca29e 100644 --- a/portkey_ai/_vendor/openai/_utils/_reflection.py +++ b/portkey_ai/_vendor/openai/_utils/_reflection.py @@ -15,6 +15,7 @@ def assert_signatures_in_sync( check_func: Callable[..., Any], *, exclude_params: set[str] = set(), + description: str = "", ) -> None: """Ensure that the signature of the second function matches the first.""" @@ -39,4 +40,6 @@ def assert_signatures_in_sync( continue if errors: - raise AssertionError(f"{len(errors)} errors encountered when comparing signatures:\n\n" + "\n\n".join(errors)) + raise AssertionError( + f"{len(errors)} errors encountered when comparing signatures{description}:\n\n" + "\n\n".join(errors) + ) diff --git a/portkey_ai/_vendor/openai/_utils/_sync.py b/portkey_ai/_vendor/openai/_utils/_sync.py index d0d81033..5d9e2c2a 100644 --- a/portkey_ai/_vendor/openai/_utils/_sync.py +++ b/portkey_ai/_vendor/openai/_utils/_sync.py @@ -1,56 +1,61 @@ from __future__ import annotations +import sys +import asyncio import functools -from typing import TypeVar, Callable, Awaitable +import contextvars +from typing import Any, TypeVar, Callable, Awaitable from typing_extensions import ParamSpec -import anyio -import anyio.to_thread - -from ._reflection import function_has_argument - T_Retval = TypeVar("T_Retval") T_ParamSpec = ParamSpec("T_ParamSpec") -# copied from `asyncer`, https://github.com/tiangolo/asyncer -def asyncify( - function: Callable[T_ParamSpec, T_Retval], - *, - cancellable: bool = False, - limiter: anyio.CapacityLimiter | None = None, -) -> Callable[T_ParamSpec, Awaitable[T_Retval]]: - """ - Take a blocking function and create an async one that receives the same - positional and keyword arguments, and that when called, calls the original function - in a worker thread using `anyio.to_thread.run_sync()`. Internally, - `asyncer.asyncify()` uses the same `anyio.to_thread.run_sync()`, but it supports - keyword arguments additional to positional arguments and it adds better support for - autocompletion and inline errors for the arguments of the function called and the - return value. +if sys.version_info >= (3, 9): + to_thread = asyncio.to_thread +else: + # backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread + # for Python 3.8 support + async def to_thread( + func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs + ) -> Any: + """Asynchronously run function *func* in a separate thread. - If the `cancellable` option is enabled and the task waiting for its completion is - cancelled, the thread will still run its course but its return value (or any raised - exception) will be ignored. + Any *args and **kwargs supplied for this function are directly passed + to *func*. Also, the current :class:`contextvars.Context` is propagated, + allowing context variables from the main thread to be accessed in the + separate thread. - Use it like this: + Returns a coroutine that can be awaited to get the eventual result of *func*. + """ + loop = asyncio.events.get_running_loop() + ctx = contextvars.copy_context() + func_call = functools.partial(ctx.run, func, *args, **kwargs) + return await loop.run_in_executor(None, func_call) + + +# inspired by `asyncer`, https://github.com/tiangolo/asyncer +def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]: + """ + Take a blocking function and create an async one that receives the same + positional and keyword arguments. For python version 3.9 and above, it uses + asyncio.to_thread to run the function in a separate thread. For python version + 3.8, it uses locally defined copy of the asyncio.to_thread function which was + introduced in python 3.9. - ```Python - def do_work(arg1, arg2, kwarg1="", kwarg2="") -> str: - # Do work - return "Some result" + Usage: + ```python + def blocking_func(arg1, arg2, kwarg1=None): + # blocking code + return result - result = await to_thread.asyncify(do_work)("spam", "ham", kwarg1="a", kwarg2="b") - print(result) + result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1) ``` ## Arguments `function`: a blocking regular callable (e.g. a function) - `cancellable`: `True` to allow cancellation of the operation - `limiter`: capacity limiter to use to limit the total amount of threads running - (if omitted, the default limiter is used) ## Return @@ -60,22 +65,6 @@ def do_work(arg1, arg2, kwarg1="", kwarg2="") -> str: """ async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval: - partial_f = functools.partial(function, *args, **kwargs) - - # In `v4.1.0` anyio added the `abandon_on_cancel` argument and deprecated the old - # `cancellable` argument, so we need to use the new `abandon_on_cancel` to avoid - # surfacing deprecation warnings. - if function_has_argument(anyio.to_thread.run_sync, "abandon_on_cancel"): - return await anyio.to_thread.run_sync( - partial_f, - abandon_on_cancel=cancellable, - limiter=limiter, - ) - - return await anyio.to_thread.run_sync( - partial_f, - cancellable=cancellable, - limiter=limiter, - ) + return await to_thread(function, *args, **kwargs) return wrapper diff --git a/portkey_ai/_vendor/openai/_utils/_transform.py b/portkey_ai/_vendor/openai/_utils/_transform.py index 47e262a5..a6b62cad 100644 --- a/portkey_ai/_vendor/openai/_utils/_transform.py +++ b/portkey_ai/_vendor/openai/_utils/_transform.py @@ -173,6 +173,11 @@ def _transform_recursive( # Iterable[T] or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) ): + # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually + # intended as an iterable, so we don't transform it. + if isinstance(data, dict): + return cast(object, data) + inner_type = extract_type_arg(stripped_type, 0) return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] @@ -186,7 +191,7 @@ def _transform_recursive( return data if isinstance(data, pydantic.BaseModel): - return model_dump(data, exclude_unset=True) + return model_dump(data, exclude_unset=True, mode="json") annotated_type = _get_annotated_type(annotation) if annotated_type is None: @@ -311,6 +316,11 @@ async def _async_transform_recursive( # Iterable[T] or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) ): + # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually + # intended as an iterable, so we don't transform it. + if isinstance(data, dict): + return cast(object, data) + inner_type = extract_type_arg(stripped_type, 0) return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] @@ -324,7 +334,7 @@ async def _async_transform_recursive( return data if isinstance(data, pydantic.BaseModel): - return model_dump(data, exclude_unset=True) + return model_dump(data, exclude_unset=True, mode="json") annotated_type = _get_annotated_type(annotation) if annotated_type is None: diff --git a/portkey_ai/_vendor/openai/_utils/_typing.py b/portkey_ai/_vendor/openai/_utils/_typing.py index c036991f..278749b1 100644 --- a/portkey_ai/_vendor/openai/_utils/_typing.py +++ b/portkey_ai/_vendor/openai/_utils/_typing.py @@ -1,8 +1,17 @@ from __future__ import annotations +import sys +import typing +import typing_extensions from typing import Any, TypeVar, Iterable, cast from collections import abc as _c_abc -from typing_extensions import Required, Annotated, get_args, get_origin +from typing_extensions import ( + TypeIs, + Required, + Annotated, + get_args, + get_origin, +) from .._types import InheritsGeneric from .._compat import is_union as _is_union @@ -36,6 +45,26 @@ def is_typevar(typ: type) -> bool: return type(typ) == TypeVar # type: ignore +_TYPE_ALIAS_TYPES: tuple[type[typing_extensions.TypeAliasType], ...] = (typing_extensions.TypeAliasType,) +if sys.version_info >= (3, 12): + _TYPE_ALIAS_TYPES = (*_TYPE_ALIAS_TYPES, typing.TypeAliasType) + + +def is_type_alias_type(tp: Any, /) -> TypeIs[typing_extensions.TypeAliasType]: + """Return whether the provided argument is an instance of `TypeAliasType`. + + ```python + type Int = int + is_type_alias_type(Int) + # > True + Str = TypeAliasType("Str", str) + is_type_alias_type(Str) + # > True + ``` + """ + return isinstance(tp, _TYPE_ALIAS_TYPES) + + # Extracts T from Annotated[T, ...] or from Required[Annotated[T, ...]] def strip_annotated_type(typ: type) -> type: if is_required_type(typ) or is_annotated_type(typ): diff --git a/portkey_ai/_vendor/openai/_utils/_utils.py b/portkey_ai/_vendor/openai/_utils/_utils.py index 2fc5a1c6..e5811bba 100644 --- a/portkey_ai/_vendor/openai/_utils/_utils.py +++ b/portkey_ai/_vendor/openai/_utils/_utils.py @@ -16,6 +16,7 @@ overload, ) from pathlib import Path +from datetime import date, datetime from typing_extensions import TypeGuard import sniffio @@ -363,12 +364,13 @@ def file_from_path(path: str) -> FileTypes: def get_required_header(headers: HeadersLike, header: str) -> str: lower_header = header.lower() - if isinstance(headers, Mapping): - for k, v in headers.items(): + if is_mapping_t(headers): + # mypy doesn't understand the type narrowing here + for k, v in headers.items(): # type: ignore if k.lower() == lower_header and isinstance(v, str): return v - """ to deal with the case where the header looks like Stainless-Event-Id """ + # to deal with the case where the header looks like Stainless-Event-Id intercaps_header = re.sub(r"([^\w])(\w)", lambda pat: pat.group(1) + pat.group(2).upper(), header.capitalize()) for normalized_header in [header, lower_header, header.upper(), intercaps_header]: @@ -394,3 +396,19 @@ def lru_cache(*, maxsize: int | None = 128) -> Callable[[CallableT], CallableT]: maxsize=maxsize, ) return cast(Any, wrapper) # type: ignore[no-any-return] + + +def json_safe(data: object) -> object: + """Translates a mapping / sequence recursively in the same fashion + as `pydantic` v2's `model_dump(mode="json")`. + """ + if is_mapping(data): + return {json_safe(key): json_safe(value) for key, value in data.items()} + + if is_iterable(data) and not isinstance(data, (str, bytes, bytearray)): + return [json_safe(item) for item in data] + + if isinstance(data, (datetime, date)): + return data.isoformat() + + return data diff --git a/portkey_ai/_vendor/openai/_version.py b/portkey_ai/_vendor/openai/_version.py index f88b8dea..c08e68e1 100644 --- a/portkey_ai/_vendor/openai/_version.py +++ b/portkey_ai/_vendor/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.40.1" # x-release-please-version +__version__ = "1.58.1" # x-release-please-version diff --git a/portkey_ai/_vendor/openai/cli/_cli.py b/portkey_ai/_vendor/openai/cli/_cli.py index f3177938..aa3cd7d1 100644 --- a/portkey_ai/_vendor/openai/cli/_cli.py +++ b/portkey_ai/_vendor/openai/cli/_cli.py @@ -15,7 +15,6 @@ from .. import _ApiType, __version__ from ._api import register_commands from ._utils import can_use_http2 -from .._types import ProxiesDict from ._errors import CLIError, display_error from .._compat import PYDANTIC_V2, ConfigDict, model_parse from .._models import BaseModel @@ -167,17 +166,17 @@ def _main() -> None: if args.verbosity != 0: sys.stderr.write("Warning: --verbosity isn't supported yet\n") - proxies: ProxiesDict = {} + proxies: dict[str, httpx.BaseTransport] = {} if args.proxy is not None: for proxy in args.proxy: key = "https://" if proxy.startswith("https") else "http://" if key in proxies: raise CLIError(f"Multiple {key} proxies given - only the last one would be used") - proxies[key] = proxy + proxies[key] = httpx.HTTPTransport(proxy=httpx.Proxy(httpx.URL(proxy))) http_client = httpx.Client( - proxies=proxies or None, + mounts=proxies or None, http2=can_use_http2(), ) openai.http_client = http_client diff --git a/portkey_ai/_vendor/openai/cli/_tools/migrate.py b/portkey_ai/_vendor/openai/cli/_tools/migrate.py index 7c10bb7f..841b7775 100644 --- a/portkey_ai/_vendor/openai/cli/_tools/migrate.py +++ b/portkey_ai/_vendor/openai/cli/_tools/migrate.py @@ -2,7 +2,6 @@ import os import sys -import json import shutil import tarfile import platform @@ -85,14 +84,16 @@ def install() -> Path: if sys.platform == "win32": raise CLIError("Windows is not supported yet in the migration CLI") - platform = "macos" if sys.platform == "darwin" else "linux" + _debug("Using Grit installer from GitHub") + + platform = "apple-darwin" if sys.platform == "darwin" else "unknown-linux-gnu" dir_name = _cache_dir() / "openai-python" install_dir = dir_name / ".install" target_dir = install_dir / "bin" - target_path = target_dir / "marzano" - temp_file = target_dir / "marzano.tmp" + target_path = target_dir / "grit" + temp_file = target_dir / "grit.tmp" if target_path.exists(): _debug(f"{target_path} already exists") @@ -109,27 +110,14 @@ def install() -> Path: arch = _get_arch() _debug(f"Using architecture {arch}") - file_name = f"marzano-{platform}-{arch}" - meta_url = f"https://api.keygen.sh/v1/accounts/{KEYGEN_ACCOUNT}/artifacts/{file_name}" + file_name = f"grit-{arch}-{platform}" + download_url = f"https://github.com/getgrit/gritql/releases/latest/download/{file_name}.tar.gz" - sys.stdout.write(f"Retrieving Grit CLI metadata from {meta_url}\n") + sys.stdout.write(f"Downloading Grit CLI from {download_url}\n") with httpx.Client() as client: - response = client.get(meta_url) # pyright: ignore[reportUnknownMemberType] - - data = response.json() - errors = data.get("errors") - if errors: - for error in errors: - sys.stdout.write(f"{error}\n") - - raise CLIError("Could not locate Grit CLI binary - see above errors") - - write_manifest(install_dir, data["data"]["relationships"]["release"]["data"]["id"]) - - link = data["data"]["links"]["redirect"] - _debug(f"Redirect URL {link}") - - download_response = client.get(link) # pyright: ignore[reportUnknownMemberType] + download_response = client.get(download_url, follow_redirects=True) + if download_response.status_code != 200: + raise CLIError(f"Failed to download Grit CLI from {download_url}") with open(temp_file, "wb") as file: for chunk in download_response.iter_bytes(): file.write(chunk) @@ -143,8 +131,7 @@ def install() -> Path: else: archive.extractall(unpacked_dir) - for item in unpacked_dir.iterdir(): - item.rename(target_dir / item.name) + _move_files_recursively(unpacked_dir, target_dir) shutil.rmtree(unpacked_dir) os.remove(temp_file) @@ -155,30 +142,23 @@ def install() -> Path: return target_path +def _move_files_recursively(source_dir: Path, target_dir: Path) -> None: + for item in source_dir.iterdir(): + if item.is_file(): + item.rename(target_dir / item.name) + elif item.is_dir(): + _move_files_recursively(item, target_dir) + + def _get_arch() -> str: architecture = platform.machine().lower() - # Map the architecture names to Node.js equivalents + # Map the architecture names to Grit equivalents arch_map = { - "x86_64": "x64", - "amd64": "x64", - "armv7l": "arm", - "aarch64": "arm64", + "x86_64": "x86_64", + "amd64": "x86_64", + "armv7l": "aarch64", + "arm64": "aarch64", } return arch_map.get(architecture, architecture) - - -def write_manifest(install_path: Path, release: str) -> None: - manifest = { - "installPath": str(install_path), - "binaries": { - "marzano": { - "name": "marzano", - "release": release, - }, - }, - } - manifest_path = Path(install_path) / "manifests.json" - with open(manifest_path, "w") as f: - json.dump(manifest, f, indent=2) diff --git a/portkey_ai/_vendor/openai/lib/_parsing/_completions.py b/portkey_ai/_vendor/openai/lib/_parsing/_completions.py index f9d1d6b3..f1fa9f2b 100644 --- a/portkey_ai/_vendor/openai/lib/_parsing/_completions.py +++ b/portkey_ai/_vendor/openai/lib/_parsing/_completions.py @@ -9,9 +9,9 @@ from .._tools import PydanticFunctionTool from ..._types import NOT_GIVEN, NotGiven from ..._utils import is_dict, is_given -from ..._compat import model_parse_json +from ..._compat import PYDANTIC_V2, model_parse_json from ..._models import construct_type_unchecked -from .._pydantic import to_strict_json_schema +from .._pydantic import is_basemodel_type, to_strict_json_schema, is_dataclass_like_type from ...types.chat import ( ParsedChoice, ChatCompletion, @@ -69,7 +69,7 @@ def parse_chat_completion( choices: list[ParsedChoice[ResponseFormatT]] = [] for choice in chat_completion.choices: if choice.finish_reason == "length": - raise LengthFinishReasonError() + raise LengthFinishReasonError(completion=chat_completion) if choice.finish_reason == "content_filter": raise ContentFilterFinishReasonError() @@ -216,14 +216,16 @@ def is_parseable_tool(input_tool: ChatCompletionToolParam) -> bool: return cast(FunctionDefinition, input_fn).get("strict") or False -def is_basemodel_type(typ: type) -> TypeGuard[type[pydantic.BaseModel]]: - return issubclass(typ, pydantic.BaseModel) - - def _parse_content(response_format: type[ResponseFormatT], content: str) -> ResponseFormatT: if is_basemodel_type(response_format): return cast(ResponseFormatT, model_parse_json(response_format, content)) + if is_dataclass_like_type(response_format): + if not PYDANTIC_V2: + raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {response_format}") + + return pydantic.TypeAdapter(response_format).validate_json(content) + raise TypeError(f"Unable to automatically parse response format type {response_format}") @@ -241,14 +243,22 @@ def type_to_response_format_param( # can only be a `type` response_format = cast(type, response_format) - if not is_basemodel_type(response_format): + json_schema_type: type[pydantic.BaseModel] | pydantic.TypeAdapter[Any] | None = None + + if is_basemodel_type(response_format): + name = response_format.__name__ + json_schema_type = response_format + elif is_dataclass_like_type(response_format): + name = response_format.__name__ + json_schema_type = pydantic.TypeAdapter(response_format) + else: raise TypeError(f"Unsupported response_format type - {response_format}") return { "type": "json_schema", "json_schema": { - "schema": to_strict_json_schema(response_format), - "name": response_format.__name__, + "schema": to_strict_json_schema(json_schema_type), + "name": name, "strict": True, }, } diff --git a/portkey_ai/_vendor/openai/lib/_pydantic.py b/portkey_ai/_vendor/openai/lib/_pydantic.py index 967ad5de..22c7a1f3 100644 --- a/portkey_ai/_vendor/openai/lib/_pydantic.py +++ b/portkey_ai/_vendor/openai/lib/_pydantic.py @@ -1,21 +1,34 @@ from __future__ import annotations -from typing import Any +import inspect +from typing import Any, TypeVar from typing_extensions import TypeGuard import pydantic +from .._types import NOT_GIVEN from .._utils import is_dict as _is_dict, is_list -from .._compat import model_json_schema +from .._compat import PYDANTIC_V2, model_json_schema +_T = TypeVar("_T") -def to_strict_json_schema(model: type[pydantic.BaseModel]) -> dict[str, Any]: - return _ensure_strict_json_schema(model_json_schema(model), path=()) + +def to_strict_json_schema(model: type[pydantic.BaseModel] | pydantic.TypeAdapter[Any]) -> dict[str, Any]: + if inspect.isclass(model) and is_basemodel_type(model): + schema = model_json_schema(model) + elif PYDANTIC_V2 and isinstance(model, pydantic.TypeAdapter): + schema = model.json_schema() + else: + raise TypeError(f"Non BaseModel types are only supported with Pydantic v2 - {model}") + + return _ensure_strict_json_schema(schema, path=(), root=schema) def _ensure_strict_json_schema( json_schema: object, + *, path: tuple[str, ...], + root: dict[str, object], ) -> dict[str, Any]: """Mutates the given JSON schema to ensure it conforms to the `strict` standard that the API expects. @@ -23,6 +36,16 @@ def _ensure_strict_json_schema( if not is_dict(json_schema): raise TypeError(f"Expected {json_schema} to be a dictionary; path={path}") + defs = json_schema.get("$defs") + if is_dict(defs): + for def_name, def_schema in defs.items(): + _ensure_strict_json_schema(def_schema, path=(*path, "$defs", def_name), root=root) + + definitions = json_schema.get("definitions") + if is_dict(definitions): + for definition_name, definition_schema in definitions.items(): + _ensure_strict_json_schema(definition_schema, path=(*path, "definitions", definition_name), root=root) + typ = json_schema.get("type") if typ == "object" and "additionalProperties" not in json_schema: json_schema["additionalProperties"] = False @@ -33,7 +56,7 @@ def _ensure_strict_json_schema( if is_dict(properties): json_schema["required"] = [prop for prop in properties.keys()] json_schema["properties"] = { - key: _ensure_strict_json_schema(prop_schema, path=(*path, "properties", key)) + key: _ensure_strict_json_schema(prop_schema, path=(*path, "properties", key), root=root) for key, prop_schema in properties.items() } @@ -41,31 +64,87 @@ def _ensure_strict_json_schema( # { 'type': 'array', 'items': {...} } items = json_schema.get("items") if is_dict(items): - json_schema["items"] = _ensure_strict_json_schema(items, path=(*path, "items")) + json_schema["items"] = _ensure_strict_json_schema(items, path=(*path, "items"), root=root) # unions any_of = json_schema.get("anyOf") if is_list(any_of): json_schema["anyOf"] = [ - _ensure_strict_json_schema(variant, path=(*path, "anyOf", str(i))) for i, variant in enumerate(any_of) + _ensure_strict_json_schema(variant, path=(*path, "anyOf", str(i)), root=root) + for i, variant in enumerate(any_of) ] # intersections all_of = json_schema.get("allOf") if is_list(all_of): - json_schema["allOf"] = [ - _ensure_strict_json_schema(entry, path=(*path, "anyOf", str(i))) for i, entry in enumerate(all_of) - ] - - defs = json_schema.get("$defs") - if is_dict(defs): - for def_name, def_schema in defs.items(): - _ensure_strict_json_schema(def_schema, path=(*path, "$defs", def_name)) + if len(all_of) == 1: + json_schema.update(_ensure_strict_json_schema(all_of[0], path=(*path, "allOf", "0"), root=root)) + json_schema.pop("allOf") + else: + json_schema["allOf"] = [ + _ensure_strict_json_schema(entry, path=(*path, "allOf", str(i)), root=root) + for i, entry in enumerate(all_of) + ] + + # strip `None` defaults as there's no meaningful distinction here + # the schema will still be `nullable` and the model will default + # to using `None` anyway + if json_schema.get("default", NOT_GIVEN) is None: + json_schema.pop("default") + + # we can't use `$ref`s if there are also other properties defined, e.g. + # `{"$ref": "...", "description": "my description"}` + # + # so we unravel the ref + # `{"type": "string", "description": "my description"}` + ref = json_schema.get("$ref") + if ref and has_more_than_n_keys(json_schema, 1): + assert isinstance(ref, str), f"Received non-string $ref - {ref}" + + resolved = resolve_ref(root=root, ref=ref) + if not is_dict(resolved): + raise ValueError(f"Expected `$ref: {ref}` to resolved to a dictionary but got {resolved}") + + # properties from the json schema take priority over the ones on the `$ref` + json_schema.update({**resolved, **json_schema}) + json_schema.pop("$ref") return json_schema +def resolve_ref(*, root: dict[str, object], ref: str) -> object: + if not ref.startswith("#/"): + raise ValueError(f"Unexpected $ref format {ref!r}; Does not start with #/") + + path = ref[2:].split("/") + resolved = root + for key in path: + value = resolved[key] + assert is_dict(value), f"encountered non-dictionary entry while resolving {ref} - {resolved}" + resolved = value + + return resolved + + +def is_basemodel_type(typ: type) -> TypeGuard[type[pydantic.BaseModel]]: + return issubclass(typ, pydantic.BaseModel) + + +def is_dataclass_like_type(typ: type) -> bool: + """Returns True if the given type likely used `@pydantic.dataclass`""" + return hasattr(typ, "__pydantic_config__") + + def is_dict(obj: object) -> TypeGuard[dict[str, object]]: # just pretend that we know there are only `str` keys # as that check is not worth the performance cost return _is_dict(obj) + + +def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool: + i = 0 + for _ in obj.keys(): + i += 1 + if i > n: + return True + return False diff --git a/portkey_ai/_vendor/openai/lib/azure.py b/portkey_ai/_vendor/openai/lib/azure.py index ef64137d..13d9f318 100644 --- a/portkey_ai/_vendor/openai/lib/azure.py +++ b/portkey_ai/_vendor/openai/lib/azure.py @@ -53,13 +53,15 @@ class BaseAzureClient(BaseClient[_HttpxClientT, _DefaultStreamT]): def _build_request( self, options: FinalRequestOptions, + *, + retries_taken: int = 0, ) -> httpx.Request: if options.url in _deployments_endpoints and is_mapping(options.json_data): model = options.json_data.get("model") if model is not None and not "/deployments" in str(self.base_url): options.url = f"/deployments/{model}{options.url}" - return super()._build_request(options) + return super()._build_request(options, retries_taken=retries_taken) class AzureOpenAI(BaseAzureClient[httpx.Client, Stream[Any]], OpenAI): @@ -74,6 +76,7 @@ def __init__( azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, + websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, @@ -92,6 +95,7 @@ def __init__( azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, + websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, @@ -110,6 +114,7 @@ def __init__( azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, + websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, @@ -129,6 +134,7 @@ def __init__( azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, project: str | None = None, + websocket_base_url: str | httpx.URL | None = None, base_url: str | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -191,9 +197,9 @@ def __init__( ) if azure_deployment is not None: - base_url = f"{azure_endpoint}/openai/deployments/{azure_deployment}" + base_url = f"{azure_endpoint.rstrip('/')}/openai/deployments/{azure_deployment}" else: - base_url = f"{azure_endpoint}/openai" + base_url = f"{azure_endpoint.rstrip('/')}/openai" else: if azure_endpoint is not None: raise ValueError("base_url and azure_endpoint are mutually exclusive") @@ -212,6 +218,7 @@ def __init__( default_headers=default_headers, default_query=default_query, http_client=http_client, + websocket_base_url=websocket_base_url, _strict_response_validation=_strict_response_validation, ) self._api_version = api_version @@ -225,6 +232,7 @@ def copy( api_key: str | None = None, organization: str | None = None, project: str | None = None, + websocket_base_url: str | httpx.URL | None = None, api_version: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, @@ -245,6 +253,7 @@ def copy( api_key=api_key, organization=organization, project=project, + websocket_base_url=websocket_base_url, base_url=base_url, timeout=timeout, http_client=http_client, @@ -312,6 +321,7 @@ def __init__( azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, project: str | None = None, + websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, @@ -331,6 +341,7 @@ def __init__( azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, project: str | None = None, + websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, @@ -350,6 +361,7 @@ def __init__( azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, project: str | None = None, + websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, @@ -370,6 +382,7 @@ def __init__( organization: str | None = None, project: str | None = None, base_url: str | None = None, + websocket_base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, @@ -431,9 +444,9 @@ def __init__( ) if azure_deployment is not None: - base_url = f"{azure_endpoint}/openai/deployments/{azure_deployment}" + base_url = f"{azure_endpoint.rstrip('/')}/openai/deployments/{azure_deployment}" else: - base_url = f"{azure_endpoint}/openai" + base_url = f"{azure_endpoint.rstrip('/')}/openai" else: if azure_endpoint is not None: raise ValueError("base_url and azure_endpoint are mutually exclusive") @@ -452,6 +465,7 @@ def __init__( default_headers=default_headers, default_query=default_query, http_client=http_client, + websocket_base_url=websocket_base_url, _strict_response_validation=_strict_response_validation, ) self._api_version = api_version @@ -465,6 +479,7 @@ def copy( api_key: str | None = None, organization: str | None = None, project: str | None = None, + websocket_base_url: str | httpx.URL | None = None, api_version: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, @@ -485,6 +500,7 @@ def copy( api_key=api_key, organization=organization, project=project, + websocket_base_url=websocket_base_url, base_url=base_url, timeout=timeout, http_client=http_client, diff --git a/portkey_ai/_vendor/openai/lib/streaming/_assistants.py b/portkey_ai/_vendor/openai/lib/streaming/_assistants.py index 7445f9a9..6efb3ca3 100644 --- a/portkey_ai/_vendor/openai/lib/streaming/_assistants.py +++ b/portkey_ai/_vendor/openai/lib/streaming/_assistants.py @@ -8,6 +8,7 @@ import httpx from ..._utils import is_dict, is_list, consume_sync_iterator, consume_async_iterator +from ..._compat import model_dump from ..._models import construct_type from ..._streaming import Stream, AsyncStream from ...types.beta import AssistantStreamEvent @@ -242,7 +243,7 @@ def on_text_delta(self, delta: TextDelta, snapshot: Text) -> None: on_text_delta(TextDelta(value=" solution"), Text(value="The solution")), on_text_delta(TextDelta(value=" to"), Text(value="The solution to")), on_text_delta(TextDelta(value=" the"), Text(value="The solution to the")), - on_text_delta(TextDelta(value=" equation"), Text(value="The solution to the equivalent")), + on_text_delta(TextDelta(value=" equation"), Text(value="The solution to the equation")), """ def on_text_done(self, text: Text) -> None: @@ -906,11 +907,11 @@ def accumulate_run_step( merged = accumulate_delta( cast( "dict[object, object]", - snapshot.model_dump(exclude_unset=True), + model_dump(snapshot, exclude_unset=True, warnings=False), ), cast( "dict[object, object]", - data.delta.model_dump(exclude_unset=True), + model_dump(data.delta, exclude_unset=True, warnings=False), ), ) run_step_snapshots[snapshot.id] = cast(RunStep, construct_type(type_=RunStep, value=merged)) @@ -948,7 +949,7 @@ def accumulate_event( construct_type( # mypy doesn't allow Content for some reason type_=cast(Any, MessageContent), - value=content_delta.model_dump(exclude_unset=True), + value=model_dump(content_delta, exclude_unset=True, warnings=False), ), ), ) @@ -957,11 +958,11 @@ def accumulate_event( merged = accumulate_delta( cast( "dict[object, object]", - block.model_dump(exclude_unset=True), + model_dump(block, exclude_unset=True, warnings=False), ), cast( "dict[object, object]", - content_delta.model_dump(exclude_unset=True), + model_dump(content_delta, exclude_unset=True, warnings=False), ), ) current_message_snapshot.content[content_delta.index] = cast( diff --git a/portkey_ai/_vendor/openai/lib/streaming/chat/__init__.py b/portkey_ai/_vendor/openai/lib/streaming/chat/__init__.py index 5881c39b..dfa3f3f2 100644 --- a/portkey_ai/_vendor/openai/lib/streaming/chat/__init__.py +++ b/portkey_ai/_vendor/openai/lib/streaming/chat/__init__.py @@ -21,6 +21,7 @@ from ._completions import ( ChatCompletionStream as ChatCompletionStream, AsyncChatCompletionStream as AsyncChatCompletionStream, + ChatCompletionStreamState as ChatCompletionStreamState, ChatCompletionStreamManager as ChatCompletionStreamManager, AsyncChatCompletionStreamManager as AsyncChatCompletionStreamManager, ) diff --git a/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py b/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py index 342a5e2b..21460913 100644 --- a/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py +++ b/portkey_ai/_vendor/openai/lib/streaming/chat/_completions.py @@ -23,7 +23,7 @@ FunctionToolCallArgumentsDeltaEvent, ) from .._deltas import accumulate_delta -from ...._types import NOT_GIVEN, NotGiven +from ...._types import NOT_GIVEN, IncEx, NotGiven from ...._utils import is_given, consume_sync_iterator, consume_async_iterator from ...._compat import model_dump from ...._models import build, construct_type @@ -287,11 +287,31 @@ async def __aexit__( class ChatCompletionStreamState(Generic[ResponseFormatT]): + """Helper class for manually accumulating `ChatCompletionChunk`s into a final `ChatCompletion` object. + + This is useful in cases where you can't always use the `.stream()` method, e.g. + + ```py + from openai.lib.streaming.chat import ChatCompletionStreamState + + state = ChatCompletionStreamState() + + stream = client.chat.completions.create(..., stream=True) + for chunk in response: + state.handle_chunk(chunk) + + # can also access the accumulated `ChatCompletion` mid-stream + state.current_completion_snapshot + + print(state.get_final_completion()) + ``` + """ + def __init__( self, *, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven, - response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, + input_tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven = NOT_GIVEN, ) -> None: self.__current_completion_snapshot: ParsedChatCompletionSnapshot | None = None self.__choice_event_states: list[ChoiceEventState] = [] @@ -301,6 +321,11 @@ def __init__( self._rich_response_format: type | NotGiven = response_format if inspect.isclass(response_format) else NOT_GIVEN def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]: + """Parse the final completion object. + + Note this does not provide any guarantees that the stream has actually finished, you must + only call this method when the stream is finished. + """ return parse_chat_completion( chat_completion=self.current_completion_snapshot, response_format=self._rich_response_format, @@ -312,8 +337,8 @@ def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot: assert self.__current_completion_snapshot is not None return self.__current_completion_snapshot - def handle_chunk(self, chunk: ChatCompletionChunk) -> list[ChatCompletionStreamEvent[ResponseFormatT]]: - """Accumulate a new chunk into the snapshot and returns a list of events to yield.""" + def handle_chunk(self, chunk: ChatCompletionChunk) -> Iterable[ChatCompletionStreamEvent[ResponseFormatT]]: + """Accumulate a new chunk into the snapshot and returns an iterable of events to yield.""" self.__current_completion_snapshot = self._accumulate_chunk(chunk) return self._build_events( @@ -352,13 +377,17 @@ def _accumulate_chunk(self, chunk: ChatCompletionChunk) -> ParsedChatCompletionS # we don't want to serialise / deserialise our custom properties # as they won't appear in the delta and we don't want to have to # continuosly reparse the content - exclude={ - "parsed": True, - "tool_calls": { - idx: {"function": {"parsed_arguments": True}} - for idx, _ in enumerate(choice_snapshot.message.tool_calls or []) + exclude=cast( + # cast required as mypy isn't smart enough to infer `True` here to `Literal[True]` + IncEx, + { + "parsed": True, + "tool_calls": { + idx: {"function": {"parsed_arguments": True}} + for idx, _ in enumerate(choice_snapshot.message.tool_calls or []) + }, }, - }, + ), ), ), cast("dict[object, object]", choice.delta.to_dict()), @@ -394,7 +423,9 @@ def _accumulate_chunk(self, chunk: ChatCompletionChunk) -> ParsedChatCompletionS if has_parseable_input(response_format=self._response_format, input_tools=self._input_tools): if choice.finish_reason == "length": - raise LengthFinishReasonError() + # at the time of writing, `.usage` will always be `None` but + # we include it here in case that is changed in the future + raise LengthFinishReasonError(completion=completion_snapshot) if choice.finish_reason == "content_filter": raise ContentFilterFinishReasonError() diff --git a/portkey_ai/_vendor/openai/resources/audio/audio.py b/portkey_ai/_vendor/openai/resources/audio/audio.py index 537ad573..18bd7b81 100644 --- a/portkey_ai/_vendor/openai/resources/audio/audio.py +++ b/portkey_ai/_vendor/openai/resources/audio/audio.py @@ -47,10 +47,21 @@ def speech(self) -> Speech: @cached_property def with_raw_response(self) -> AudioWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AudioWithRawResponse(self) @cached_property def with_streaming_response(self) -> AudioWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AudioWithStreamingResponse(self) @@ -69,10 +80,21 @@ def speech(self) -> AsyncSpeech: @cached_property def with_raw_response(self) -> AsyncAudioWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncAudioWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncAudioWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncAudioWithStreamingResponse(self) diff --git a/portkey_ai/_vendor/openai/resources/audio/speech.py b/portkey_ai/_vendor/openai/resources/audio/speech.py index a0df9ec4..09faaddd 100644 --- a/portkey_ai/_vendor/openai/resources/audio/speech.py +++ b/portkey_ai/_vendor/openai/resources/audio/speech.py @@ -31,10 +31,21 @@ class Speech(SyncAPIResource): @cached_property def with_raw_response(self) -> SpeechWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return SpeechWithRawResponse(self) @cached_property def with_streaming_response(self) -> SpeechWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return SpeechWithStreamingResponse(self) def create( @@ -59,13 +70,13 @@ def create( input: The text to generate audio for. The maximum length is 4096 characters. model: - One of the available [TTS models](https://platform.openai.com/docs/models/tts): + One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` voice: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the - [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). + [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. @@ -104,10 +115,21 @@ def create( class AsyncSpeech(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncSpeechWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncSpeechWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncSpeechWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncSpeechWithStreamingResponse(self) async def create( @@ -132,13 +154,13 @@ async def create( input: The text to generate audio for. The maximum length is 4096 characters. model: - One of the available [TTS models](https://platform.openai.com/docs/models/tts): + One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` voice: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the - [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). + [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. diff --git a/portkey_ai/_vendor/openai/resources/audio/transcriptions.py b/portkey_ai/_vendor/openai/resources/audio/transcriptions.py index 1ee96241..8b5f4404 100644 --- a/portkey_ai/_vendor/openai/resources/audio/transcriptions.py +++ b/portkey_ai/_vendor/openai/resources/audio/transcriptions.py @@ -2,12 +2,14 @@ from __future__ import annotations -from typing import List, Union, Mapping, cast -from typing_extensions import Literal +import logging +from typing import TYPE_CHECKING, List, Union, Mapping, cast +from typing_extensions import Literal, overload, assert_never import httpx from ... import _legacy_response +from ...types import AudioResponseFormat from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from ..._utils import ( extract_files, @@ -22,19 +24,91 @@ from ..._base_client import make_request_options from ...types.audio_model import AudioModel from ...types.audio.transcription import Transcription +from ...types.audio_response_format import AudioResponseFormat +from ...types.audio.transcription_verbose import TranscriptionVerbose __all__ = ["Transcriptions", "AsyncTranscriptions"] +log: logging.Logger = logging.getLogger("openai.audio.transcriptions") + class Transcriptions(SyncAPIResource): @cached_property def with_raw_response(self) -> TranscriptionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return TranscriptionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> TranscriptionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return TranscriptionsWithStreamingResponse(self) + @overload + def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, + language: str | NotGiven = NOT_GIVEN, + prompt: str | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Transcription: ... + + @overload + def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + response_format: Literal["verbose_json"], + language: str | NotGiven = NOT_GIVEN, + prompt: str | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> TranscriptionVerbose: ... + + @overload + def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + response_format: Literal["text", "srt", "vtt"], + language: str | NotGiven = NOT_GIVEN, + prompt: str | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> str: ... + def create( self, *, @@ -42,7 +116,7 @@ def create( model: Union[str, AudioModel], language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, + response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -51,7 +125,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Transcription: + ) -> Transcription | TranscriptionVerbose | str: """ Transcribes audio into the input language. @@ -69,11 +143,11 @@ def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The - [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should match the audio language. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -111,34 +185,65 @@ def create( # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( + return self._post( # type: ignore[return-value] "/audio/transcriptions", body=maybe_transform(body, transcription_create_params.TranscriptionCreateParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=Transcription, + cast_to=_get_response_format_type(response_format), ) class AsyncTranscriptions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranscriptionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncTranscriptionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncTranscriptionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncTranscriptionsWithStreamingResponse(self) + @overload + async def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, + language: str | NotGiven = NOT_GIVEN, + prompt: str | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Transcription: ... + + @overload async def create( self, *, file: FileTypes, model: Union[str, AudioModel], + response_format: Literal["verbose_json"], language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -147,7 +252,44 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Transcription: + ) -> TranscriptionVerbose: ... + + @overload + async def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + response_format: Literal["text", "srt", "vtt"], + language: str | NotGiven = NOT_GIVEN, + prompt: str | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> str: ... + + async def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + language: str | NotGiven = NOT_GIVEN, + prompt: str | NotGiven = NOT_GIVEN, + response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Transcription | TranscriptionVerbose | str: """ Transcribes audio into the input language. @@ -165,11 +307,11 @@ async def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The - [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should match the audio language. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -214,7 +356,7 @@ async def create( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=Transcription, + cast_to=_get_response_format_type(response_format), ) @@ -252,3 +394,22 @@ def __init__(self, transcriptions: AsyncTranscriptions) -> None: self.create = async_to_streamed_response_wrapper( transcriptions.create, ) + + +def _get_response_format_type( + response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven, +) -> type[Transcription | TranscriptionVerbose | str]: + if isinstance(response_format, NotGiven) or response_format is None: # pyright: ignore[reportUnnecessaryComparison] + return Transcription + + if response_format == "json": + return Transcription + elif response_format == "verbose_json": + return TranscriptionVerbose + elif response_format == "srt" or response_format == "text" or response_format == "vtt": + return str + elif TYPE_CHECKING: # type: ignore[unreachable] + assert_never(response_format) + else: + log.warn("Unexpected audio response format: %s", response_format) + return Transcription diff --git a/portkey_ai/_vendor/openai/resources/audio/translations.py b/portkey_ai/_vendor/openai/resources/audio/translations.py index ed97ccf8..a2d28afa 100644 --- a/portkey_ai/_vendor/openai/resources/audio/translations.py +++ b/portkey_ai/_vendor/openai/resources/audio/translations.py @@ -2,11 +2,14 @@ from __future__ import annotations -from typing import Union, Mapping, cast +import logging +from typing import TYPE_CHECKING, Union, Mapping, cast +from typing_extensions import Literal, overload, assert_never import httpx from ... import _legacy_response +from ...types import AudioResponseFormat from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from ..._utils import ( extract_files, @@ -21,26 +24,76 @@ from ..._base_client import make_request_options from ...types.audio_model import AudioModel from ...types.audio.translation import Translation +from ...types.audio_response_format import AudioResponseFormat +from ...types.audio.translation_verbose import TranslationVerbose __all__ = ["Translations", "AsyncTranslations"] +log: logging.Logger = logging.getLogger("openai.audio.transcriptions") + class Translations(SyncAPIResource): @cached_property def with_raw_response(self) -> TranslationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return TranslationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> TranslationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return TranslationsWithStreamingResponse(self) + @overload + def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, + prompt: str | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Translation: ... + + @overload + def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + response_format: Literal["verbose_json"], + prompt: str | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> TranslationVerbose: ... + + @overload def create( self, *, file: FileTypes, model: Union[str, AudioModel], + response_format: Literal["text", "srt", "vtt"], prompt: str | NotGiven = NOT_GIVEN, - response_format: str | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -48,7 +101,23 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Translation: + ) -> str: ... + + def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + prompt: str | NotGiven = NOT_GIVEN, + response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Translation | TranslationVerbose | str: """ Translates audio into English. @@ -61,11 +130,11 @@ def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The - [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should be in English. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -95,33 +164,45 @@ def create( # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( + return self._post( # type: ignore[return-value] "/audio/translations", body=maybe_transform(body, translation_create_params.TranslationCreateParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=Translation, + cast_to=_get_response_format_type(response_format), ) class AsyncTranslations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranslationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncTranslationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncTranslationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncTranslationsWithStreamingResponse(self) + @overload async def create( self, *, file: FileTypes, model: Union[str, AudioModel], + response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, - response_format: str | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -129,7 +210,57 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Translation: + ) -> Translation: ... + + @overload + async def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + response_format: Literal["verbose_json"], + prompt: str | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> TranslationVerbose: ... + + @overload + async def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + response_format: Literal["text", "srt", "vtt"], + prompt: str | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> str: ... + + async def create( + self, + *, + file: FileTypes, + model: Union[str, AudioModel], + prompt: str | NotGiven = NOT_GIVEN, + response_format: Union[AudioResponseFormat, NotGiven] = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Translation | TranslationVerbose | str: """ Translates audio into English. @@ -142,11 +273,11 @@ async def create( prompt: An optional text to guide the model's style or continue a previous audio segment. The - [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should be in English. - response_format: The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and @@ -183,7 +314,7 @@ async def create( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=Translation, + cast_to=_get_response_format_type(response_format), ) @@ -221,3 +352,22 @@ def __init__(self, translations: AsyncTranslations) -> None: self.create = async_to_streamed_response_wrapper( translations.create, ) + + +def _get_response_format_type( + response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven, +) -> type[Translation | TranslationVerbose | str]: + if isinstance(response_format, NotGiven) or response_format is None: # pyright: ignore[reportUnnecessaryComparison] + return Translation + + if response_format == "json": + return Translation + elif response_format == "verbose_json": + return TranslationVerbose + elif response_format == "srt" or response_format == "text" or response_format == "vtt": + return str + elif TYPE_CHECKING: # type: ignore[unreachable] + assert_never(response_format) + else: + log.warn("Unexpected audio response format: %s", response_format) + return Transcription diff --git a/portkey_ai/_vendor/openai/resources/batches.py b/portkey_ai/_vendor/openai/resources/batches.py index 7152fac6..7cab7578 100644 --- a/portkey_ai/_vendor/openai/resources/batches.py +++ b/portkey_ai/_vendor/openai/resources/batches.py @@ -30,10 +30,21 @@ class Batches(SyncAPIResource): @cached_property def with_raw_response(self) -> BatchesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return BatchesWithRawResponse(self) @cached_property def with_streaming_response(self) -> BatchesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return BatchesWithStreamingResponse(self) def create( @@ -70,7 +81,7 @@ def create( Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - requests, and can be up to 100 MB in size. + requests, and can be up to 200 MB in size. metadata: Optional custom metadata for the batch. @@ -224,10 +235,21 @@ def cancel( class AsyncBatches(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncBatchesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncBatchesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncBatchesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncBatchesWithStreamingResponse(self) async def create( @@ -264,7 +286,7 @@ async def create( Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - requests, and can be up to 100 MB in size. + requests, and can be up to 200 MB in size. metadata: Optional custom metadata for the batch. diff --git a/portkey_ai/_vendor/openai/resources/beta/assistants.py b/portkey_ai/_vendor/openai/resources/beta/assistants.py index 441390d2..7df212f1 100644 --- a/portkey_ai/_vendor/openai/resources/beta/assistants.py +++ b/portkey_ai/_vendor/openai/resources/beta/assistants.py @@ -35,10 +35,21 @@ class Assistants(SyncAPIResource): @cached_property def with_raw_response(self) -> AssistantsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AssistantsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AssistantsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AssistantsWithStreamingResponse(self) def create( @@ -68,8 +79,8 @@ def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. description: The description of the assistant. The maximum length is 512 characters. @@ -78,22 +89,22 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the assistant. The maximum length is 256 characters. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -222,28 +233,28 @@ def update( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. name: The name of the assistant. The maximum length is 256 characters. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -333,8 +344,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -410,10 +421,21 @@ def delete( class AsyncAssistants(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncAssistantsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncAssistantsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncAssistantsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncAssistantsWithStreamingResponse(self) async def create( @@ -443,8 +465,8 @@ async def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. description: The description of the assistant. The maximum length is 512 characters. @@ -453,22 +475,22 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the assistant. The maximum length is 256 characters. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -597,28 +619,28 @@ async def update( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. name: The name of the assistant. The maximum length is 256 characters. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -708,8 +730,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. diff --git a/portkey_ai/_vendor/openai/resources/beta/beta.py b/portkey_ai/_vendor/openai/resources/beta/beta.py index 479c97c4..1ffa6c8e 100644 --- a/portkey_ai/_vendor/openai/resources/beta/beta.py +++ b/portkey_ai/_vendor/openai/resources/beta/beta.py @@ -2,14 +2,6 @@ from __future__ import annotations -from .threads import ( - Threads, - AsyncThreads, - ThreadsWithRawResponse, - AsyncThreadsWithRawResponse, - ThreadsWithStreamingResponse, - AsyncThreadsWithStreamingResponse, -) from ..._compat import cached_property from .chat.chat import Chat, AsyncChat from .assistants import ( @@ -21,7 +13,23 @@ AsyncAssistantsWithStreamingResponse, ) from ..._resource import SyncAPIResource, AsyncAPIResource -from .vector_stores import ( +from .threads.threads import ( + Threads, + AsyncThreads, + ThreadsWithRawResponse, + AsyncThreadsWithRawResponse, + ThreadsWithStreamingResponse, + AsyncThreadsWithStreamingResponse, +) +from .realtime.realtime import ( + Realtime, + AsyncRealtime, + RealtimeWithRawResponse, + AsyncRealtimeWithRawResponse, + RealtimeWithStreamingResponse, + AsyncRealtimeWithStreamingResponse, +) +from .vector_stores.vector_stores import ( VectorStores, AsyncVectorStores, VectorStoresWithRawResponse, @@ -29,8 +37,6 @@ VectorStoresWithStreamingResponse, AsyncVectorStoresWithStreamingResponse, ) -from .threads.threads import Threads, AsyncThreads -from .vector_stores.vector_stores import VectorStores, AsyncVectorStores __all__ = ["Beta", "AsyncBeta"] @@ -40,6 +46,10 @@ class Beta(SyncAPIResource): def chat(self) -> Chat: return Chat(self._client) + @cached_property + def realtime(self) -> Realtime: + return Realtime(self._client) + @cached_property def vector_stores(self) -> VectorStores: return VectorStores(self._client) @@ -54,10 +64,21 @@ def threads(self) -> Threads: @cached_property def with_raw_response(self) -> BetaWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return BetaWithRawResponse(self) @cached_property def with_streaming_response(self) -> BetaWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return BetaWithStreamingResponse(self) @@ -66,6 +87,10 @@ class AsyncBeta(AsyncAPIResource): def chat(self) -> AsyncChat: return AsyncChat(self._client) + @cached_property + def realtime(self) -> AsyncRealtime: + return AsyncRealtime(self._client) + @cached_property def vector_stores(self) -> AsyncVectorStores: return AsyncVectorStores(self._client) @@ -80,10 +105,21 @@ def threads(self) -> AsyncThreads: @cached_property def with_raw_response(self) -> AsyncBetaWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncBetaWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncBetaWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncBetaWithStreamingResponse(self) @@ -91,6 +127,10 @@ class BetaWithRawResponse: def __init__(self, beta: Beta) -> None: self._beta = beta + @cached_property + def realtime(self) -> RealtimeWithRawResponse: + return RealtimeWithRawResponse(self._beta.realtime) + @cached_property def vector_stores(self) -> VectorStoresWithRawResponse: return VectorStoresWithRawResponse(self._beta.vector_stores) @@ -108,6 +148,10 @@ class AsyncBetaWithRawResponse: def __init__(self, beta: AsyncBeta) -> None: self._beta = beta + @cached_property + def realtime(self) -> AsyncRealtimeWithRawResponse: + return AsyncRealtimeWithRawResponse(self._beta.realtime) + @cached_property def vector_stores(self) -> AsyncVectorStoresWithRawResponse: return AsyncVectorStoresWithRawResponse(self._beta.vector_stores) @@ -125,6 +169,10 @@ class BetaWithStreamingResponse: def __init__(self, beta: Beta) -> None: self._beta = beta + @cached_property + def realtime(self) -> RealtimeWithStreamingResponse: + return RealtimeWithStreamingResponse(self._beta.realtime) + @cached_property def vector_stores(self) -> VectorStoresWithStreamingResponse: return VectorStoresWithStreamingResponse(self._beta.vector_stores) @@ -142,6 +190,10 @@ class AsyncBetaWithStreamingResponse: def __init__(self, beta: AsyncBeta) -> None: self._beta = beta + @cached_property + def realtime(self) -> AsyncRealtimeWithStreamingResponse: + return AsyncRealtimeWithStreamingResponse(self._beta.realtime) + @cached_property def vector_stores(self) -> AsyncVectorStoresWithStreamingResponse: return AsyncVectorStoresWithStreamingResponse(self._beta.vector_stores) diff --git a/portkey_ai/_vendor/openai/resources/beta/chat/completions.py b/portkey_ai/_vendor/openai/resources/beta/chat/completions.py index 88ea2c05..48cb13f7 100644 --- a/portkey_ai/_vendor/openai/resources/beta/chat/completions.py +++ b/portkey_ai/_vendor/openai/resources/beta/chat/completions.py @@ -2,16 +2,24 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import Dict, List, Type, Union, Iterable, Optional, cast from functools import partial from typing_extensions import Literal import httpx +from .... import _legacy_response from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ...._streaming import Stream -from ....types.chat import completion_create_params +from ....types.chat import ( + ChatCompletionReasoningEffort, + completion_create_params, +) +from ...._base_client import make_request_options from ....lib._parsing import ( ResponseFormatT, validate_input_tools as _validate_input_tools, @@ -20,35 +28,65 @@ ) from ....types.chat_model import ChatModel from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager +from ....types.chat.chat_completion import ChatCompletion from ....types.chat.chat_completion_chunk import ChatCompletionChunk from ....types.chat.parsed_chat_completion import ParsedChatCompletion +from ....types.chat.chat_completion_modality import ChatCompletionModality from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam +from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam +from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam __all__ = ["Completions", "AsyncCompletions"] class Completions(SyncAPIResource): + @cached_property + def with_raw_response(self) -> CompletionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return CompletionsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> CompletionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return CompletionsWithStreamingResponse(self) + def parse( self, *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -78,14 +116,17 @@ def parse( from pydantic import BaseModel from openai import OpenAI + class Step(BaseModel): explanation: str output: str + class MathResponse(BaseModel): steps: List[Step] final_answer: str + client = OpenAI() completion = client.beta.chat.completions.parse( model="gpt-4o-2024-08-06", @@ -109,38 +150,61 @@ class MathResponse(BaseModel): **(extra_headers or {}), } - raw_completion = self._client.chat.completions.create( - messages=messages, - model=model, - response_format=_type_to_response_format(response_format), - frequency_penalty=frequency_penalty, - function_call=function_call, - functions=functions, - logit_bias=logit_bias, - logprobs=logprobs, - max_tokens=max_tokens, - n=n, - parallel_tool_calls=parallel_tool_calls, - presence_penalty=presence_penalty, - seed=seed, - service_tier=service_tier, - stop=stop, - stream_options=stream_options, - temperature=temperature, - tool_choice=tool_choice, - tools=tools, - top_logprobs=top_logprobs, - top_p=top_p, - user=user, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - return _parse_chat_completion( - response_format=response_format, - chat_completion=raw_completion, - input_tools=tools, + def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]: + return _parse_chat_completion( + response_format=response_format, + chat_completion=raw_completion, + input_tools=tools, + ) + + return self._post( + "/chat/completions", + body=maybe_transform( + { + "messages": messages, + "model": model, + "audio": audio, + "frequency_penalty": frequency_penalty, + "function_call": function_call, + "functions": functions, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "modalities": modalities, + "n": n, + "parallel_tool_calls": parallel_tool_calls, + "prediction": prediction, + "presence_penalty": presence_penalty, + "reasoning_effort": reasoning_effort, + "response_format": _type_to_response_format(response_format), + "seed": seed, + "service_tier": service_tier, + "stop": stop, + "store": store, + "stream": False, + "stream_options": stream_options, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=parser, + ), + # we turn the `ChatCompletion` instance into a `ParsedChatCompletion` + # in the `parser` function above + cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion), + stream=False, ) def stream( @@ -148,19 +212,26 @@ def stream( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -184,12 +255,12 @@ def stream( ```py with client.beta.chat.completions.stream( - model='gpt-4o-2024-08-06', + model="gpt-4o-2024-08-06", messages=[...], ) as stream: for event in stream: - if event.type == 'content.delta': - print(event.content, flush=True, end='') + if event.type == "content.delta": + print(event.delta, flush=True, end="") ``` When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events). @@ -206,6 +277,7 @@ def stream( self._client.chat.completions.create, messages=messages, model=model, + audio=audio, stream=True, response_format=_type_to_response_format(response_format), frequency_penalty=frequency_penalty, @@ -213,12 +285,18 @@ def stream( functions=functions, logit_bias=logit_bias, logprobs=logprobs, + max_completion_tokens=max_completion_tokens, max_tokens=max_tokens, + metadata=metadata, + modalities=modalities, n=n, parallel_tool_calls=parallel_tool_calls, + prediction=prediction, presence_penalty=presence_penalty, + reasoning_effort=reasoning_effort, seed=seed, service_tier=service_tier, + store=store, stop=stop, stream_options=stream_options, temperature=temperature, @@ -240,24 +318,50 @@ def stream( class AsyncCompletions(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncCompletionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncCompletionsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncCompletionsWithStreamingResponse(self) + async def parse( self, *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -287,14 +391,17 @@ async def parse( from pydantic import BaseModel from openai import AsyncOpenAI + class Step(BaseModel): explanation: str output: str + class MathResponse(BaseModel): steps: List[Step] final_answer: str + client = AsyncOpenAI() completion = await client.beta.chat.completions.parse( model="gpt-4o-2024-08-06", @@ -318,38 +425,61 @@ class MathResponse(BaseModel): **(extra_headers or {}), } - raw_completion = await self._client.chat.completions.create( - messages=messages, - model=model, - response_format=_type_to_response_format(response_format), - frequency_penalty=frequency_penalty, - function_call=function_call, - functions=functions, - logit_bias=logit_bias, - logprobs=logprobs, - max_tokens=max_tokens, - n=n, - parallel_tool_calls=parallel_tool_calls, - presence_penalty=presence_penalty, - seed=seed, - service_tier=service_tier, - stop=stop, - stream_options=stream_options, - temperature=temperature, - tool_choice=tool_choice, - tools=tools, - top_logprobs=top_logprobs, - top_p=top_p, - user=user, - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - ) - return _parse_chat_completion( - response_format=response_format, - chat_completion=raw_completion, - input_tools=tools, + def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]: + return _parse_chat_completion( + response_format=response_format, + chat_completion=raw_completion, + input_tools=tools, + ) + + return await self._post( + "/chat/completions", + body=await async_maybe_transform( + { + "messages": messages, + "model": model, + "audio": audio, + "frequency_penalty": frequency_penalty, + "function_call": function_call, + "functions": functions, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "modalities": modalities, + "n": n, + "parallel_tool_calls": parallel_tool_calls, + "prediction": prediction, + "presence_penalty": presence_penalty, + "reasoning_effort": reasoning_effort, + "response_format": _type_to_response_format(response_format), + "seed": seed, + "service_tier": service_tier, + "store": store, + "stop": stop, + "stream": False, + "stream_options": stream_options, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + post_parser=parser, + ), + # we turn the `ChatCompletion` instance into a `ParsedChatCompletion` + # in the `parser` function above + cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion), + stream=False, ) def stream( @@ -357,19 +487,26 @@ def stream( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -393,12 +530,12 @@ def stream( ```py async with client.beta.chat.completions.stream( - model='gpt-4o-2024-08-06', + model="gpt-4o-2024-08-06", messages=[...], ) as stream: async for event in stream: - if event.type == 'content.delta': - print(event.content, flush=True, end='') + if event.type == "content.delta": + print(event.delta, flush=True, end="") ``` When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events). @@ -416,6 +553,7 @@ def stream( api_request = self._client.chat.completions.create( messages=messages, model=model, + audio=audio, stream=True, response_format=_type_to_response_format(response_format), frequency_penalty=frequency_penalty, @@ -423,13 +561,19 @@ def stream( functions=functions, logit_bias=logit_bias, logprobs=logprobs, + max_completion_tokens=max_completion_tokens, max_tokens=max_tokens, + metadata=metadata, + modalities=modalities, n=n, parallel_tool_calls=parallel_tool_calls, + prediction=prediction, presence_penalty=presence_penalty, + reasoning_effort=reasoning_effort, seed=seed, service_tier=service_tier, stop=stop, + store=store, stream_options=stream_options, temperature=temperature, tool_choice=tool_choice, @@ -447,3 +591,39 @@ def stream( response_format=response_format, input_tools=tools, ) + + +class CompletionsWithRawResponse: + def __init__(self, completions: Completions) -> None: + self._completions = completions + + self.parse = _legacy_response.to_raw_response_wrapper( + completions.parse, + ) + + +class AsyncCompletionsWithRawResponse: + def __init__(self, completions: AsyncCompletions) -> None: + self._completions = completions + + self.parse = _legacy_response.async_to_raw_response_wrapper( + completions.parse, + ) + + +class CompletionsWithStreamingResponse: + def __init__(self, completions: Completions) -> None: + self._completions = completions + + self.parse = to_streamed_response_wrapper( + completions.parse, + ) + + +class AsyncCompletionsWithStreamingResponse: + def __init__(self, completions: AsyncCompletions) -> None: + self._completions = completions + + self.parse = async_to_streamed_response_wrapper( + completions.parse, + ) diff --git a/portkey_ai/_vendor/openai/resources/beta/realtime/__init__.py b/portkey_ai/_vendor/openai/resources/beta/realtime/__init__.py new file mode 100644 index 00000000..474434e6 --- /dev/null +++ b/portkey_ai/_vendor/openai/resources/beta/realtime/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .realtime import ( + Realtime, + AsyncRealtime, + RealtimeWithRawResponse, + AsyncRealtimeWithRawResponse, + RealtimeWithStreamingResponse, + AsyncRealtimeWithStreamingResponse, +) +from .sessions import ( + Sessions, + AsyncSessions, + SessionsWithRawResponse, + AsyncSessionsWithRawResponse, + SessionsWithStreamingResponse, + AsyncSessionsWithStreamingResponse, +) + +__all__ = [ + "Sessions", + "AsyncSessions", + "SessionsWithRawResponse", + "AsyncSessionsWithRawResponse", + "SessionsWithStreamingResponse", + "AsyncSessionsWithStreamingResponse", + "Realtime", + "AsyncRealtime", + "RealtimeWithRawResponse", + "AsyncRealtimeWithRawResponse", + "RealtimeWithStreamingResponse", + "AsyncRealtimeWithStreamingResponse", +] diff --git a/portkey_ai/_vendor/openai/resources/beta/realtime/realtime.py b/portkey_ai/_vendor/openai/resources/beta/realtime/realtime.py new file mode 100644 index 00000000..c79fd462 --- /dev/null +++ b/portkey_ai/_vendor/openai/resources/beta/realtime/realtime.py @@ -0,0 +1,954 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import json +import logging +from types import TracebackType +from typing import TYPE_CHECKING, Any, Iterator, cast +from typing_extensions import AsyncIterator + +import httpx +from pydantic import BaseModel + +from .sessions import ( + Sessions, + AsyncSessions, + SessionsWithRawResponse, + AsyncSessionsWithRawResponse, + SessionsWithStreamingResponse, + AsyncSessionsWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Query, Headers, NotGiven +from ...._utils import ( + maybe_transform, + strip_not_given, + async_maybe_transform, +) +from ...._compat import cached_property +from ...._models import construct_type_unchecked +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._exceptions import OpenAIError +from ...._base_client import _merge_mappings +from ....types.beta.realtime import session_update_event_param, response_create_event_param +from ....types.websocket_connection_options import WebsocketConnectionOptions +from ....types.beta.realtime.realtime_client_event import RealtimeClientEvent +from ....types.beta.realtime.realtime_server_event import RealtimeServerEvent +from ....types.beta.realtime.conversation_item_param import ConversationItemParam +from ....types.beta.realtime.realtime_client_event_param import RealtimeClientEventParam + +if TYPE_CHECKING: + from websockets.sync.client import ClientConnection as WebsocketConnection + from websockets.asyncio.client import ClientConnection as AsyncWebsocketConnection + + from ...._client import OpenAI, AsyncOpenAI + +__all__ = ["Realtime", "AsyncRealtime"] + +log: logging.Logger = logging.getLogger(__name__) + + +class Realtime(SyncAPIResource): + @cached_property + def sessions(self) -> Sessions: + return Sessions(self._client) + + @cached_property + def with_raw_response(self) -> RealtimeWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return RealtimeWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RealtimeWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return RealtimeWithStreamingResponse(self) + + def connect( + self, + *, + model: str, + extra_query: Query = {}, + extra_headers: Headers = {}, + websocket_connection_options: WebsocketConnectionOptions = {}, + ) -> RealtimeConnectionManager: + """ + The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling. + + Some notable benefits of the API include: + + - Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output. + - Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction. + - Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback. + + The Realtime API is a stateful, event-based API that communicates over a WebSocket. + """ + return RealtimeConnectionManager( + client=self._client, + extra_query=extra_query, + extra_headers=extra_headers, + websocket_connection_options=websocket_connection_options, + model=model, + ) + + +class AsyncRealtime(AsyncAPIResource): + @cached_property + def sessions(self) -> AsyncSessions: + return AsyncSessions(self._client) + + @cached_property + def with_raw_response(self) -> AsyncRealtimeWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncRealtimeWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRealtimeWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncRealtimeWithStreamingResponse(self) + + def connect( + self, + *, + model: str, + extra_query: Query = {}, + extra_headers: Headers = {}, + websocket_connection_options: WebsocketConnectionOptions = {}, + ) -> AsyncRealtimeConnectionManager: + """ + The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling. + + Some notable benefits of the API include: + + - Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output. + - Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction. + - Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback. + + The Realtime API is a stateful, event-based API that communicates over a WebSocket. + """ + return AsyncRealtimeConnectionManager( + client=self._client, + extra_query=extra_query, + extra_headers=extra_headers, + websocket_connection_options=websocket_connection_options, + model=model, + ) + + +class RealtimeWithRawResponse: + def __init__(self, realtime: Realtime) -> None: + self._realtime = realtime + + @cached_property + def sessions(self) -> SessionsWithRawResponse: + return SessionsWithRawResponse(self._realtime.sessions) + + +class AsyncRealtimeWithRawResponse: + def __init__(self, realtime: AsyncRealtime) -> None: + self._realtime = realtime + + @cached_property + def sessions(self) -> AsyncSessionsWithRawResponse: + return AsyncSessionsWithRawResponse(self._realtime.sessions) + + +class RealtimeWithStreamingResponse: + def __init__(self, realtime: Realtime) -> None: + self._realtime = realtime + + @cached_property + def sessions(self) -> SessionsWithStreamingResponse: + return SessionsWithStreamingResponse(self._realtime.sessions) + + +class AsyncRealtimeWithStreamingResponse: + def __init__(self, realtime: AsyncRealtime) -> None: + self._realtime = realtime + + @cached_property + def sessions(self) -> AsyncSessionsWithStreamingResponse: + return AsyncSessionsWithStreamingResponse(self._realtime.sessions) + + +class AsyncRealtimeConnection: + """Represents a live websocket connection to the Realtime API""" + + session: AsyncRealtimeSessionResource + response: AsyncRealtimeResponseResource + conversation: AsyncRealtimeConversationResource + input_audio_buffer: AsyncRealtimeInputAudioBufferResource + + _connection: AsyncWebsocketConnection + + def __init__(self, connection: AsyncWebsocketConnection) -> None: + self._connection = connection + + self.session = AsyncRealtimeSessionResource(self) + self.response = AsyncRealtimeResponseResource(self) + self.conversation = AsyncRealtimeConversationResource(self) + self.input_audio_buffer = AsyncRealtimeInputAudioBufferResource(self) + + async def __aiter__(self) -> AsyncIterator[RealtimeServerEvent]: + """ + An infinite-iterator that will continue to yield events until + the connection is closed. + """ + from websockets.exceptions import ConnectionClosedOK + + try: + while True: + yield await self.recv() + except ConnectionClosedOK: + return + + async def recv(self) -> RealtimeServerEvent: + """ + Receive the next message from the connection and parses it into a `RealtimeServerEvent` object. + + Canceling this method is safe. There's no risk of losing data. + """ + return self.parse_event(await self.recv_bytes()) + + async def recv_bytes(self) -> bytes: + """Receive the next message from the connection as raw bytes. + + Canceling this method is safe. There's no risk of losing data. + + If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does, + then you can call `.parse_event(data)`. + """ + message = await self._connection.recv(decode=False) + log.debug(f"Received websocket message: %s", message) + if not isinstance(message, bytes): + # passing `decode=False` should always result in us getting `bytes` back + raise TypeError(f"Expected `.recv(decode=False)` to return `bytes` but got {type(message)}") + + return message + + async def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None: + data = ( + event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True) + if isinstance(event, BaseModel) + else json.dumps(await async_maybe_transform(event, RealtimeClientEventParam)) + ) + await self._connection.send(data) + + async def close(self, *, code: int = 1000, reason: str = "") -> None: + await self._connection.close(code=code, reason=reason) + + def parse_event(self, data: str | bytes) -> RealtimeServerEvent: + """ + Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object. + + This is helpful if you're using `.recv_bytes()`. + """ + return cast( + RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent)) + ) + + +class AsyncRealtimeConnectionManager: + """ + Context manager over a `AsyncRealtimeConnection` that is returned by `beta.realtime.connect()` + + This context manager ensures that the connection will be closed when it exits. + + --- + + Note that if your application doesn't work well with the context manager approach then you + can call the `.enter()` method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = await client.beta.realtime.connect(...).enter() + # ... + await connection.close() + ``` + """ + + def __init__( + self, + *, + client: AsyncOpenAI, + model: str, + extra_query: Query, + extra_headers: Headers, + websocket_connection_options: WebsocketConnectionOptions, + ) -> None: + self.__client = client + self.__model = model + self.__connection: AsyncRealtimeConnection | None = None + self.__extra_query = extra_query + self.__extra_headers = extra_headers + self.__websocket_connection_options = websocket_connection_options + + async def __aenter__(self) -> AsyncRealtimeConnection: + """ + 👋 If your application doesn't work well with the context manager approach then you + can call this method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = await client.beta.realtime.connect(...).enter() + # ... + await connection.close() + ``` + """ + try: + from websockets.asyncio.client import connect + except ImportError as exc: + raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc + + url = self._prepare_url().copy_with( + params={ + **self.__client.base_url.params, + "model": self.__model, + **self.__extra_query, + }, + ) + log.debug("Connecting to %s", url) + if self.__websocket_connection_options: + log.debug("Connection options: %s", self.__websocket_connection_options) + + self.__connection = AsyncRealtimeConnection( + await connect( + str(url), + user_agent_header=self.__client.user_agent, + additional_headers=_merge_mappings( + { + **self.__client.auth_headers, + "OpenAI-Beta": "realtime=v1", + }, + self.__extra_headers, + ), + **self.__websocket_connection_options, + ) + ) + + return self.__connection + + enter = __aenter__ + + def _prepare_url(self) -> httpx.URL: + if self.__client.websocket_base_url is not None: + base_url = httpx.URL(self.__client.websocket_base_url) + else: + base_url = self.__client._base_url.copy_with(scheme="wss") + + merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime" + return base_url.copy_with(raw_path=merge_raw_path) + + async def __aexit__( + self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None + ) -> None: + if self.__connection is not None: + await self.__connection.close() + + +class RealtimeConnection: + """Represents a live websocket connection to the Realtime API""" + + session: RealtimeSessionResource + response: RealtimeResponseResource + conversation: RealtimeConversationResource + input_audio_buffer: RealtimeInputAudioBufferResource + + _connection: WebsocketConnection + + def __init__(self, connection: WebsocketConnection) -> None: + self._connection = connection + + self.session = RealtimeSessionResource(self) + self.response = RealtimeResponseResource(self) + self.conversation = RealtimeConversationResource(self) + self.input_audio_buffer = RealtimeInputAudioBufferResource(self) + + def __iter__(self) -> Iterator[RealtimeServerEvent]: + """ + An infinite-iterator that will continue to yield events until + the connection is closed. + """ + from websockets.exceptions import ConnectionClosedOK + + try: + while True: + yield self.recv() + except ConnectionClosedOK: + return + + def recv(self) -> RealtimeServerEvent: + """ + Receive the next message from the connection and parses it into a `RealtimeServerEvent` object. + + Canceling this method is safe. There's no risk of losing data. + """ + return self.parse_event(self.recv_bytes()) + + def recv_bytes(self) -> bytes: + """Receive the next message from the connection as raw bytes. + + Canceling this method is safe. There's no risk of losing data. + + If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does, + then you can call `.parse_event(data)`. + """ + message = self._connection.recv(decode=False) + log.debug(f"Received websocket message: %s", message) + if not isinstance(message, bytes): + # passing `decode=False` should always result in us getting `bytes` back + raise TypeError(f"Expected `.recv(decode=False)` to return `bytes` but got {type(message)}") + + return message + + def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None: + data = ( + event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True) + if isinstance(event, BaseModel) + else json.dumps(maybe_transform(event, RealtimeClientEventParam)) + ) + self._connection.send(data) + + def close(self, *, code: int = 1000, reason: str = "") -> None: + self._connection.close(code=code, reason=reason) + + def parse_event(self, data: str | bytes) -> RealtimeServerEvent: + """ + Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object. + + This is helpful if you're using `.recv_bytes()`. + """ + return cast( + RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent)) + ) + + +class RealtimeConnectionManager: + """ + Context manager over a `RealtimeConnection` that is returned by `beta.realtime.connect()` + + This context manager ensures that the connection will be closed when it exits. + + --- + + Note that if your application doesn't work well with the context manager approach then you + can call the `.enter()` method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = client.beta.realtime.connect(...).enter() + # ... + connection.close() + ``` + """ + + def __init__( + self, + *, + client: OpenAI, + model: str, + extra_query: Query, + extra_headers: Headers, + websocket_connection_options: WebsocketConnectionOptions, + ) -> None: + self.__client = client + self.__model = model + self.__connection: RealtimeConnection | None = None + self.__extra_query = extra_query + self.__extra_headers = extra_headers + self.__websocket_connection_options = websocket_connection_options + + def __enter__(self) -> RealtimeConnection: + """ + 👋 If your application doesn't work well with the context manager approach then you + can call this method directly to initiate a connection. + + **Warning**: You must remember to close the connection with `.close()`. + + ```py + connection = client.beta.realtime.connect(...).enter() + # ... + connection.close() + ``` + """ + try: + from websockets.sync.client import connect + except ImportError as exc: + raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc + + url = self._prepare_url().copy_with( + params={ + **self.__client.base_url.params, + "model": self.__model, + **self.__extra_query, + }, + ) + log.debug("Connecting to %s", url) + if self.__websocket_connection_options: + log.debug("Connection options: %s", self.__websocket_connection_options) + + self.__connection = RealtimeConnection( + connect( + str(url), + user_agent_header=self.__client.user_agent, + additional_headers=_merge_mappings( + { + **self.__client.auth_headers, + "OpenAI-Beta": "realtime=v1", + }, + self.__extra_headers, + ), + **self.__websocket_connection_options, + ) + ) + + return self.__connection + + enter = __enter__ + + def _prepare_url(self) -> httpx.URL: + if self.__client.websocket_base_url is not None: + base_url = httpx.URL(self.__client.websocket_base_url) + else: + base_url = self.__client._base_url.copy_with(scheme="wss") + + merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime" + return base_url.copy_with(raw_path=merge_raw_path) + + def __exit__( + self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None + ) -> None: + if self.__connection is not None: + self.__connection.close() + + +class BaseRealtimeConnectionResource: + def __init__(self, connection: RealtimeConnection) -> None: + self._connection = connection + + +class RealtimeSessionResource(BaseRealtimeConnectionResource): + def update(self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to update the session’s default configuration. + + The client may + send this event at any time to update the session configuration, and any + field may be updated at any time, except for "voice". The server will respond + with a `session.updated` event that shows the full effective configuration. + Only fields that are present are updated, thus the correct way to clear a + field like "instructions" is to pass an empty string. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "session.update", "session": session, "event_id": event_id}), + ) + ) + + +class RealtimeResponseResource(BaseRealtimeConnectionResource): + def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to cancel an in-progress response. + + The server will respond + with a `response.cancelled` event or an error if there is no response to + cancel. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}), + ) + ) + + def create( + self, + *, + event_id: str | NotGiven = NOT_GIVEN, + response: response_create_event_param.Response | NotGiven = NOT_GIVEN, + ) -> None: + """ + This event instructs the server to create a Response, which means triggering + model inference. When in Server VAD mode, the server will create Responses + automatically. + + A Response will include at least one Item, and may have two, in which case + the second will be a function call. These Items will be appended to the + conversation history. + + The server will respond with a `response.created` event, events for Items + and content created, and finally a `response.done` event to indicate the + Response is complete. + + The `response.create` event includes inference configuration like + `instructions`, and `temperature`. These fields will override the Session's + configuration for this Response only. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.create", "event_id": event_id, "response": response}), + ) + ) + + +class RealtimeConversationResource(BaseRealtimeConnectionResource): + @cached_property + def item(self) -> RealtimeConversationItemResource: + return RealtimeConversationItemResource(self._connection) + + +class RealtimeConversationItemResource(BaseRealtimeConnectionResource): + def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event when you want to remove any item from the conversation + history. + + The server will respond with a `conversation.item.deleted` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}), + ) + ) + + def create( + self, + *, + item: ConversationItemParam, + event_id: str | NotGiven = NOT_GIVEN, + previous_item_id: str | NotGiven = NOT_GIVEN, + ) -> None: + """ + Add a new Item to the Conversation's context, including messages, function + calls, and function call responses. This event can be used both to populate a + "history" of the conversation and to add new items mid-stream, but has the + current limitation that it cannot populate assistant audio messages. + + If successful, the server will respond with a `conversation.item.created` + event, otherwise an `error` event will be sent. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.create", + "item": item, + "event_id": event_id, + "previous_item_id": previous_item_id, + } + ), + ) + ) + + def truncate( + self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN + ) -> None: + """Send this event to truncate a previous assistant message’s audio. + + The server + will produce audio faster than realtime, so this event is useful when the user + interrupts to truncate audio that has already been sent to the client but not + yet played. This will synchronize the server's understanding of the audio with + the client's playback. + + Truncating audio will delete the server-side text transcript to ensure there + is not text in the context that hasn't been heard by the user. + + If successful, the server will respond with a `conversation.item.truncated` + event. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.truncate", + "audio_end_ms": audio_end_ms, + "content_index": content_index, + "item_id": item_id, + "event_id": event_id, + } + ), + ) + ) + + +class RealtimeInputAudioBufferResource(BaseRealtimeConnectionResource): + def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to clear the audio bytes in the buffer. + + The server will + respond with an `input_audio_buffer.cleared` event. + """ + self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id})) + ) + + def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """ + Send this event to commit the user input audio buffer, which will create a + new user message item in the conversation. This event will produce an error + if the input audio buffer is empty. When in Server VAD mode, the client does + not need to send this event, the server will commit the audio buffer + automatically. + + Committing the input audio buffer will trigger input audio transcription + (if enabled in session configuration), but it will not create a response + from the model. The server will respond with an `input_audio_buffer.committed` + event. + """ + self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id})) + ) + + def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to append audio bytes to the input audio buffer. + + The audio + buffer is temporary storage you can write to and later commit. In Server VAD + mode, the audio buffer is used to detect speech and the server will decide + when to commit. When Server VAD is disabled, you must commit the audio buffer + manually. + + The client may choose how much audio to place in each event up to a maximum + of 15 MiB, for example streaming smaller chunks from the client may allow the + VAD to be more responsive. Unlike made other client events, the server will + not send a confirmation response to this event. + """ + self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}), + ) + ) + + +class BaseAsyncRealtimeConnectionResource: + def __init__(self, connection: AsyncRealtimeConnection) -> None: + self._connection = connection + + +class AsyncRealtimeSessionResource(BaseAsyncRealtimeConnectionResource): + async def update( + self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN + ) -> None: + """Send this event to update the session’s default configuration. + + The client may + send this event at any time to update the session configuration, and any + field may be updated at any time, except for "voice". The server will respond + with a `session.updated` event that shows the full effective configuration. + Only fields that are present are updated, thus the correct way to clear a + field like "instructions" is to pass an empty string. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "session.update", "session": session, "event_id": event_id}), + ) + ) + + +class AsyncRealtimeResponseResource(BaseAsyncRealtimeConnectionResource): + async def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to cancel an in-progress response. + + The server will respond + with a `response.cancelled` event or an error if there is no response to + cancel. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}), + ) + ) + + async def create( + self, + *, + event_id: str | NotGiven = NOT_GIVEN, + response: response_create_event_param.Response | NotGiven = NOT_GIVEN, + ) -> None: + """ + This event instructs the server to create a Response, which means triggering + model inference. When in Server VAD mode, the server will create Responses + automatically. + + A Response will include at least one Item, and may have two, in which case + the second will be a function call. These Items will be appended to the + conversation history. + + The server will respond with a `response.created` event, events for Items + and content created, and finally a `response.done` event to indicate the + Response is complete. + + The `response.create` event includes inference configuration like + `instructions`, and `temperature`. These fields will override the Session's + configuration for this Response only. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "response.create", "event_id": event_id, "response": response}), + ) + ) + + +class AsyncRealtimeConversationResource(BaseAsyncRealtimeConnectionResource): + @cached_property + def item(self) -> AsyncRealtimeConversationItemResource: + return AsyncRealtimeConversationItemResource(self._connection) + + +class AsyncRealtimeConversationItemResource(BaseAsyncRealtimeConnectionResource): + async def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event when you want to remove any item from the conversation + history. + + The server will respond with a `conversation.item.deleted` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}), + ) + ) + + async def create( + self, + *, + item: ConversationItemParam, + event_id: str | NotGiven = NOT_GIVEN, + previous_item_id: str | NotGiven = NOT_GIVEN, + ) -> None: + """ + Add a new Item to the Conversation's context, including messages, function + calls, and function call responses. This event can be used both to populate a + "history" of the conversation and to add new items mid-stream, but has the + current limitation that it cannot populate assistant audio messages. + + If successful, the server will respond with a `conversation.item.created` + event, otherwise an `error` event will be sent. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.create", + "item": item, + "event_id": event_id, + "previous_item_id": previous_item_id, + } + ), + ) + ) + + async def truncate( + self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN + ) -> None: + """Send this event to truncate a previous assistant message’s audio. + + The server + will produce audio faster than realtime, so this event is useful when the user + interrupts to truncate audio that has already been sent to the client but not + yet played. This will synchronize the server's understanding of the audio with + the client's playback. + + Truncating audio will delete the server-side text transcript to ensure there + is not text in the context that hasn't been heard by the user. + + If successful, the server will respond with a `conversation.item.truncated` + event. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given( + { + "type": "conversation.item.truncate", + "audio_end_ms": audio_end_ms, + "content_index": content_index, + "item_id": item_id, + "event_id": event_id, + } + ), + ) + ) + + +class AsyncRealtimeInputAudioBufferResource(BaseAsyncRealtimeConnectionResource): + async def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to clear the audio bytes in the buffer. + + The server will + respond with an `input_audio_buffer.cleared` event. + """ + await self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id})) + ) + + async def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None: + """ + Send this event to commit the user input audio buffer, which will create a + new user message item in the conversation. This event will produce an error + if the input audio buffer is empty. When in Server VAD mode, the client does + not need to send this event, the server will commit the audio buffer + automatically. + + Committing the input audio buffer will trigger input audio transcription + (if enabled in session configuration), but it will not create a response + from the model. The server will respond with an `input_audio_buffer.committed` + event. + """ + await self._connection.send( + cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id})) + ) + + async def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None: + """Send this event to append audio bytes to the input audio buffer. + + The audio + buffer is temporary storage you can write to and later commit. In Server VAD + mode, the audio buffer is used to detect speech and the server will decide + when to commit. When Server VAD is disabled, you must commit the audio buffer + manually. + + The client may choose how much audio to place in each event up to a maximum + of 15 MiB, for example streaming smaller chunks from the client may allow the + VAD to be more responsive. Unlike made other client events, the server will + not send a confirmation response to this event. + """ + await self._connection.send( + cast( + RealtimeClientEventParam, + strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}), + ) + ) diff --git a/portkey_ai/_vendor/openai/resources/beta/realtime/sessions.py b/portkey_ai/_vendor/openai/resources/beta/realtime/sessions.py new file mode 100644 index 00000000..1d1ee701 --- /dev/null +++ b/portkey_ai/_vendor/openai/resources/beta/realtime/sessions.py @@ -0,0 +1,337 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal + +import httpx + +from .... import _legacy_response +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import ( + maybe_transform, + async_maybe_transform, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ...._base_client import make_request_options +from ....types.beta.realtime import session_create_params +from ....types.beta.realtime.session_create_response import SessionCreateResponse + +__all__ = ["Sessions", "AsyncSessions"] + + +class Sessions(SyncAPIResource): + @cached_property + def with_raw_response(self) -> SessionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return SessionsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> SessionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return SessionsWithStreamingResponse(self) + + def create( + self, + *, + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ], + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + tool_choice: str | NotGiven = NOT_GIVEN, + tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, + turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, + voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SessionCreateResponse: + """ + Create an ephemeral API token for use in client-side applications with the + Realtime API. Can be configured with the same session parameters as the + `session.update` client event. + + It responds with a session object, plus a `client_secret` key which contains a + usable ephemeral API token that can be used to authenticate browser clients for + the Realtime API. + + Args: + model: The Realtime model used for this session. + + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + + input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + + instructions: The default system instructions (i.e. system message) prepended to model calls. + This field allows the client to guide the model on desired responses. The model + can be instructed on response content and format, (e.g. "be extremely succinct", + "act friendly", "here are examples of good responses") and on audio behavior + (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + instructions are not guaranteed to be followed by the model, but they provide + guidance to the model on the desired behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + + max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + + modalities: The set of modalities the model can respond with. To disable audio, set this to + ["text"]. + + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + + temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + + tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify + a function. + + tools: Tools (functions) available to the model. + + turn_detection: Configuration for turn detection. Can be set to `null` to turn off. Server VAD + means that the model will detect the start and end of speech based on audio + volume and respond at the end of user speech. + + voice: The voice the model uses to respond. Voice cannot be changed during the session + once the model has responded with audio at least once. Current voice options are + `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return self._post( + "/realtime/sessions", + body=maybe_transform( + { + "model": model, + "input_audio_format": input_audio_format, + "input_audio_transcription": input_audio_transcription, + "instructions": instructions, + "max_response_output_tokens": max_response_output_tokens, + "modalities": modalities, + "output_audio_format": output_audio_format, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "turn_detection": turn_detection, + "voice": voice, + }, + session_create_params.SessionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SessionCreateResponse, + ) + + +class AsyncSessions(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncSessionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncSessionsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncSessionsWithStreamingResponse(self) + + async def create( + self, + *, + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ], + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + tool_choice: str | NotGiven = NOT_GIVEN, + tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, + turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, + voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SessionCreateResponse: + """ + Create an ephemeral API token for use in client-side applications with the + Realtime API. Can be configured with the same session parameters as the + `session.update` client event. + + It responds with a session object, plus a `client_secret` key which contains a + usable ephemeral API token that can be used to authenticate browser clients for + the Realtime API. + + Args: + model: The Realtime model used for this session. + + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + + input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + + instructions: The default system instructions (i.e. system message) prepended to model calls. + This field allows the client to guide the model on desired responses. The model + can be instructed on response content and format, (e.g. "be extremely succinct", + "act friendly", "here are examples of good responses") and on audio behavior + (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + instructions are not guaranteed to be followed by the model, but they provide + guidance to the model on the desired behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + + max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + + modalities: The set of modalities the model can respond with. To disable audio, set this to + ["text"]. + + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + + temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + + tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify + a function. + + tools: Tools (functions) available to the model. + + turn_detection: Configuration for turn detection. Can be set to `null` to turn off. Server VAD + means that the model will detect the start and end of speech based on audio + volume and respond at the end of user speech. + + voice: The voice the model uses to respond. Voice cannot be changed during the session + once the model has responded with audio at least once. Current voice options are + `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} + return await self._post( + "/realtime/sessions", + body=await async_maybe_transform( + { + "model": model, + "input_audio_format": input_audio_format, + "input_audio_transcription": input_audio_transcription, + "instructions": instructions, + "max_response_output_tokens": max_response_output_tokens, + "modalities": modalities, + "output_audio_format": output_audio_format, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "turn_detection": turn_detection, + "voice": voice, + }, + session_create_params.SessionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SessionCreateResponse, + ) + + +class SessionsWithRawResponse: + def __init__(self, sessions: Sessions) -> None: + self._sessions = sessions + + self.create = _legacy_response.to_raw_response_wrapper( + sessions.create, + ) + + +class AsyncSessionsWithRawResponse: + def __init__(self, sessions: AsyncSessions) -> None: + self._sessions = sessions + + self.create = _legacy_response.async_to_raw_response_wrapper( + sessions.create, + ) + + +class SessionsWithStreamingResponse: + def __init__(self, sessions: Sessions) -> None: + self._sessions = sessions + + self.create = to_streamed_response_wrapper( + sessions.create, + ) + + +class AsyncSessionsWithStreamingResponse: + def __init__(self, sessions: AsyncSessions) -> None: + self._sessions = sessions + + self.create = async_to_streamed_response_wrapper( + sessions.create, + ) diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/messages.py b/portkey_ai/_vendor/openai/resources/beta/threads/messages.py index f0832515..e8485073 100644 --- a/portkey_ai/_vendor/openai/resources/beta/threads/messages.py +++ b/portkey_ai/_vendor/openai/resources/beta/threads/messages.py @@ -32,10 +32,21 @@ class Messages(SyncAPIResource): @cached_property def with_raw_response(self) -> MessagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return MessagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> MessagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return MessagesWithStreamingResponse(self) def create( @@ -71,7 +82,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers @@ -158,7 +169,7 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers @@ -210,8 +221,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -295,10 +306,21 @@ def delete( class AsyncMessages(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncMessagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncMessagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncMessagesWithStreamingResponse(self) async def create( @@ -334,7 +356,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers @@ -421,7 +443,7 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers @@ -473,8 +495,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py b/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py index cbfb9546..0418d570 100644 --- a/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py +++ b/portkey_ai/_vendor/openai/resources/beta/threads/runs/runs.py @@ -3,9 +3,9 @@ from __future__ import annotations import typing_extensions -from typing import Union, Iterable, Optional, overload +from typing import List, Union, Iterable, Optional from functools import partial -from typing_extensions import Literal +from typing_extensions import Literal, overload import httpx @@ -49,6 +49,7 @@ from .....types.beta.threads.run import Run from .....types.beta.assistant_tool_param import AssistantToolParam from .....types.beta.assistant_stream_event import AssistantStreamEvent +from .....types.beta.threads.runs.run_step_include import RunStepInclude from .....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from .....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -62,10 +63,21 @@ def steps(self) -> Steps: @cached_property def with_raw_response(self) -> RunsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return RunsWithRawResponse(self) @cached_property def with_streaming_response(self) -> RunsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return RunsWithStreamingResponse(self) @overload @@ -74,6 +86,7 @@ def create( thread_id: str, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -104,6 +117,14 @@ def create( [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -128,7 +149,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -137,20 +158,20 @@ def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -206,6 +227,7 @@ def create( *, assistant_id: str, stream: Literal[True], + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -239,6 +261,14 @@ def create( events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -263,7 +293,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -272,20 +302,20 @@ def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -337,6 +367,7 @@ def create( *, assistant_id: str, stream: bool, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -370,6 +401,14 @@ def create( events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -394,7 +433,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -403,20 +442,20 @@ def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -467,6 +506,7 @@ def create( thread_id: str, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -516,7 +556,11 @@ def create( run_create_params.RunCreateParams, ), options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, run_create_params.RunCreateParams), ), cast_to=Run, stream=stream or False, @@ -579,7 +623,7 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers @@ -630,8 +674,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -712,6 +756,7 @@ def create_and_poll( self, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -743,6 +788,7 @@ def create_and_poll( run = self.create( thread_id=thread_id, assistant_id=assistant_id, + include=include, additional_instructions=additional_instructions, additional_messages=additional_messages, instructions=instructions, @@ -958,6 +1004,7 @@ def stream( self, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -988,6 +1035,7 @@ def stream( self, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1018,6 +1066,7 @@ def stream( self, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1076,7 +1125,11 @@ def stream( run_create_params.RunCreateParams, ), options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, run_create_params.RunCreateParams), ), cast_to=Run, stream=True, @@ -1375,10 +1428,21 @@ def steps(self) -> AsyncSteps: @cached_property def with_raw_response(self) -> AsyncRunsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncRunsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncRunsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncRunsWithStreamingResponse(self) @overload @@ -1387,6 +1451,7 @@ async def create( thread_id: str, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1417,6 +1482,14 @@ async def create( [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -1441,7 +1514,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -1450,20 +1523,20 @@ async def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1519,6 +1592,7 @@ async def create( *, assistant_id: str, stream: Literal[True], + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1552,6 +1626,14 @@ async def create( events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -1576,7 +1658,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -1585,20 +1667,20 @@ async def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1650,6 +1732,7 @@ async def create( *, assistant_id: str, stream: bool, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1683,6 +1766,14 @@ async def create( events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. @@ -1707,7 +1798,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -1716,20 +1807,20 @@ async def create( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1780,6 +1871,7 @@ async def create( thread_id: str, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1829,7 +1921,11 @@ async def create( run_create_params.RunCreateParams, ), options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"include": include}, run_create_params.RunCreateParams), ), cast_to=Run, stream=stream or False, @@ -1892,7 +1988,7 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. extra_headers: Send extra headers @@ -1943,8 +2039,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -2025,6 +2121,7 @@ async def create_and_poll( self, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -2056,6 +2153,7 @@ async def create_and_poll( run = await self.create( thread_id=thread_id, assistant_id=assistant_id, + include=include, additional_instructions=additional_instructions, additional_messages=additional_messages, instructions=instructions, @@ -2303,6 +2401,7 @@ def stream( self, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -2333,6 +2432,7 @@ def stream( self, *, assistant_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -2393,7 +2493,11 @@ def stream( run_create_params.RunCreateParams, ), options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, run_create_params.RunCreateParams), ), cast_to=Run, stream=True, diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py b/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py index 51200893..9bd91e39 100644 --- a/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py +++ b/portkey_ai/_vendor/openai/resources/beta/threads/runs/steps.py @@ -2,23 +2,25 @@ from __future__ import annotations +from typing import List from typing_extensions import Literal import httpx from ..... import _legacy_response from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ....._utils import maybe_transform +from ....._utils import ( + maybe_transform, + async_maybe_transform, +) from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .....pagination import SyncCursorPage, AsyncCursorPage -from ....._base_client import ( - AsyncPaginator, - make_request_options, -) -from .....types.beta.threads.runs import step_list_params +from ....._base_client import AsyncPaginator, make_request_options +from .....types.beta.threads.runs import step_list_params, step_retrieve_params from .....types.beta.threads.runs.run_step import RunStep +from .....types.beta.threads.runs.run_step_include import RunStepInclude __all__ = ["Steps", "AsyncSteps"] @@ -26,10 +28,21 @@ class Steps(SyncAPIResource): @cached_property def with_raw_response(self) -> StepsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return StepsWithRawResponse(self) @cached_property def with_streaming_response(self) -> StepsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return StepsWithStreamingResponse(self) def retrieve( @@ -38,6 +51,7 @@ def retrieve( *, thread_id: str, run_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -49,6 +63,14 @@ def retrieve( Retrieves a run step. Args: + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -67,7 +89,11 @@ def retrieve( return self._get( f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams), ), cast_to=RunStep, ) @@ -79,6 +105,7 @@ def list( thread_id: str, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -99,8 +126,16 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -133,6 +168,7 @@ def list( { "after": after, "before": before, + "include": include, "limit": limit, "order": order, }, @@ -146,10 +182,21 @@ def list( class AsyncSteps(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncStepsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncStepsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncStepsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncStepsWithStreamingResponse(self) async def retrieve( @@ -158,6 +205,7 @@ async def retrieve( *, thread_id: str, run_id: str, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -169,6 +217,14 @@ async def retrieve( Retrieves a run step. Args: + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -187,7 +243,11 @@ async def retrieve( return await self._get( f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"include": include}, step_retrieve_params.StepRetrieveParams), ), cast_to=RunStep, ) @@ -199,6 +259,7 @@ def list( thread_id: str, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, + include: List[RunStepInclude] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -219,8 +280,16 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. + + include: A list of additional fields to include in the response. Currently the only + supported value is `step_details.tool_calls[*].file_search.results[*].content` + to fetch the file search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -253,6 +322,7 @@ def list( { "after": after, "before": before, + "include": include, "limit": limit, "order": order, }, diff --git a/portkey_ai/_vendor/openai/resources/beta/threads/threads.py b/portkey_ai/_vendor/openai/resources/beta/threads/threads.py index 4c95c484..e45090ab 100644 --- a/portkey_ai/_vendor/openai/resources/beta/threads/threads.py +++ b/portkey_ai/_vendor/openai/resources/beta/threads/threads.py @@ -2,21 +2,13 @@ from __future__ import annotations -from typing import Union, Iterable, Optional, overload +from typing import Union, Iterable, Optional from functools import partial -from typing_extensions import Literal +from typing_extensions import Literal, overload import httpx from .... import _legacy_response -from .runs import ( - Runs, - AsyncRuns, - RunsWithRawResponse, - AsyncRunsWithRawResponse, - RunsWithStreamingResponse, - AsyncRunsWithStreamingResponse, -) from .messages import ( Messages, AsyncMessages, @@ -31,7 +23,14 @@ maybe_transform, async_maybe_transform, ) -from .runs.runs import Runs, AsyncRuns +from .runs.runs import ( + Runs, + AsyncRuns, + RunsWithRawResponse, + AsyncRunsWithRawResponse, + RunsWithStreamingResponse, + AsyncRunsWithStreamingResponse, +) from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper @@ -72,10 +71,21 @@ def messages(self) -> Messages: @cached_property def with_raw_response(self) -> ThreadsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return ThreadsWithRawResponse(self) @cached_property def with_streaming_response(self) -> ThreadsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return ThreadsWithStreamingResponse(self) def create( @@ -100,7 +110,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. tool_resources: A set of resources that are made available to the assistant's tools in this @@ -186,7 +196,7 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. tool_resources: A set of resources that are made available to the assistant's tools in this @@ -306,7 +316,7 @@ def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -315,20 +325,20 @@ def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -440,7 +450,7 @@ def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -449,20 +459,20 @@ def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -570,7 +580,7 @@ def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -579,20 +589,20 @@ def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -895,10 +905,21 @@ def messages(self) -> AsyncMessages: @cached_property def with_raw_response(self) -> AsyncThreadsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncThreadsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncThreadsWithStreamingResponse(self) async def create( @@ -923,7 +944,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. tool_resources: A set of resources that are made available to the assistant's tools in this @@ -1009,7 +1030,7 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. tool_resources: A set of resources that are made available to the assistant's tools in this @@ -1129,7 +1150,7 @@ async def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -1138,20 +1159,20 @@ async def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1263,7 +1284,7 @@ async def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -1272,20 +1293,20 @@ async def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1393,7 +1414,7 @@ async def create_and_run( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -1402,20 +1423,20 @@ async def create_and_run( assistant will be used. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/portkey_ai/_vendor/openai/resources/beta/vector_stores/file_batches.py b/portkey_ai/_vendor/openai/resources/beta/vector_stores/file_batches.py index d6862c24..9f9e643b 100644 --- a/portkey_ai/_vendor/openai/resources/beta/vector_stores/file_batches.py +++ b/portkey_ai/_vendor/openai/resources/beta/vector_stores/file_batches.py @@ -22,11 +22,10 @@ from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage -from ...._base_client import ( - AsyncPaginator, - make_request_options, -) +from ....types.beta import FileChunkingStrategyParam +from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.vector_stores import file_batch_create_params, file_batch_list_files_params +from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam from ....types.beta.vector_stores.vector_store_file import VectorStoreFile from ....types.beta.vector_stores.vector_store_file_batch import VectorStoreFileBatch @@ -36,10 +35,21 @@ class FileBatches(SyncAPIResource): @cached_property def with_raw_response(self) -> FileBatchesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return FileBatchesWithRawResponse(self) @cached_property def with_streaming_response(self) -> FileBatchesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return FileBatchesWithStreamingResponse(self) def create( @@ -47,7 +57,7 @@ def create( vector_store_id: str, *, file_ids: List[str], - chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -64,7 +74,7 @@ def create( files. chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. + strategy. Only applicable if `file_ids` is non-empty. extra_headers: Send extra headers @@ -174,7 +184,7 @@ def create_and_poll( *, file_ids: List[str], poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFileBatch: """Create a vector store batch and poll until all files have been processed.""" batch = self.create( @@ -217,8 +227,8 @@ def list_files( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. @@ -308,7 +318,7 @@ def upload_and_poll( max_concurrency: int = 5, file_ids: List[str] = [], poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFileBatch: """Uploads the given files concurrently and then creates a vector store file batch. @@ -354,10 +364,21 @@ def upload_and_poll( class AsyncFileBatches(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFileBatchesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncFileBatchesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFileBatchesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncFileBatchesWithStreamingResponse(self) async def create( @@ -365,7 +386,7 @@ async def create( vector_store_id: str, *, file_ids: List[str], - chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -382,7 +403,7 @@ async def create( files. chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. + strategy. Only applicable if `file_ids` is non-empty. extra_headers: Send extra headers @@ -492,7 +513,7 @@ async def create_and_poll( *, file_ids: List[str], poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFileBatch: """Create a vector store batch and poll until all files have been processed.""" batch = await self.create( @@ -535,8 +556,8 @@ def list_files( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. @@ -626,7 +647,7 @@ async def upload_and_poll( max_concurrency: int = 5, file_ids: List[str] = [], poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFileBatch: """Uploads the given files concurrently and then creates a vector store file batch. diff --git a/portkey_ai/_vendor/openai/resources/beta/vector_stores/files.py b/portkey_ai/_vendor/openai/resources/beta/vector_stores/files.py index 35ca331c..7c155ac9 100644 --- a/portkey_ai/_vendor/openai/resources/beta/vector_stores/files.py +++ b/portkey_ai/_vendor/openai/resources/beta/vector_stores/files.py @@ -18,11 +18,10 @@ from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage -from ...._base_client import ( - AsyncPaginator, - make_request_options, -) +from ....types.beta import FileChunkingStrategyParam +from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.vector_stores import file_list_params, file_create_params +from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam from ....types.beta.vector_stores.vector_store_file import VectorStoreFile from ....types.beta.vector_stores.vector_store_file_deleted import VectorStoreFileDeleted @@ -32,10 +31,21 @@ class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return FilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> FilesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return FilesWithStreamingResponse(self) def create( @@ -43,7 +53,7 @@ def create( vector_store_id: str, *, file_id: str, - chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -62,7 +72,7 @@ def create( files. chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. + strategy. Only applicable if `file_ids` is non-empty. extra_headers: Send extra headers @@ -154,8 +164,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. @@ -245,7 +255,7 @@ def create_and_poll( *, vector_store_id: str, poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: """Attach a file to the given vector store and wait for it to be processed.""" self.create(vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy) @@ -302,7 +312,7 @@ def upload( *, vector_store_id: str, file: FileTypes, - chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: """Upload a file to the `files` API and then attach it to the given vector store. @@ -318,7 +328,7 @@ def upload_and_poll( vector_store_id: str, file: FileTypes, poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: """Add a file to a vector store and poll until processing is complete.""" file_obj = self._client.files.create(file=file, purpose="assistants") @@ -333,10 +343,21 @@ def upload_and_poll( class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncFilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFilesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncFilesWithStreamingResponse(self) async def create( @@ -344,7 +365,7 @@ async def create( vector_store_id: str, *, file_id: str, - chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -363,7 +384,7 @@ async def create( files. chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto` - strategy. + strategy. Only applicable if `file_ids` is non-empty. extra_headers: Send extra headers @@ -455,8 +476,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. filter: Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. @@ -546,7 +567,7 @@ async def create_and_poll( *, vector_store_id: str, poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: """Attach a file to the given vector store and wait for it to be processed.""" await self.create(vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy) @@ -603,7 +624,7 @@ async def upload( *, vector_store_id: str, file: FileTypes, - chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: """Upload a file to the `files` API and then attach it to the given vector store. @@ -621,7 +642,7 @@ async def upload_and_poll( vector_store_id: str, file: FileTypes, poll_interval_ms: int | NotGiven = NOT_GIVEN, - chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: """Add a file to a vector store and poll until processing is complete.""" file_obj = await self._client.files.create(file=file, purpose="assistants") diff --git a/portkey_ai/_vendor/openai/resources/beta/vector_stores/vector_stores.py b/portkey_ai/_vendor/openai/resources/beta/vector_stores/vector_stores.py index cbd56a06..61a2eadc 100644 --- a/portkey_ai/_vendor/openai/resources/beta/vector_stores/vector_stores.py +++ b/portkey_ai/_vendor/openai/resources/beta/vector_stores/vector_stores.py @@ -33,13 +33,16 @@ AsyncFileBatchesWithStreamingResponse, ) from ....pagination import SyncCursorPage, AsyncCursorPage -from ....types.beta import vector_store_list_params, vector_store_create_params, vector_store_update_params -from ...._base_client import ( - AsyncPaginator, - make_request_options, +from ....types.beta import ( + FileChunkingStrategyParam, + vector_store_list_params, + vector_store_create_params, + vector_store_update_params, ) +from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.vector_store import VectorStore from ....types.beta.vector_store_deleted import VectorStoreDeleted +from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam __all__ = ["VectorStores", "AsyncVectorStores"] @@ -55,16 +58,27 @@ def file_batches(self) -> FileBatches: @cached_property def with_raw_response(self) -> VectorStoresWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return VectorStoresWithRawResponse(self) @cached_property def with_streaming_response(self) -> VectorStoresWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return VectorStoresWithStreamingResponse(self) def create( self, *, - chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, @@ -91,7 +105,7 @@ def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the vector store. @@ -179,7 +193,7 @@ def update( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the vector store. @@ -237,8 +251,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. @@ -322,16 +336,27 @@ def file_batches(self) -> AsyncFileBatches: @cached_property def with_raw_response(self) -> AsyncVectorStoresWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncVectorStoresWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncVectorStoresWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncVectorStoresWithStreamingResponse(self) async def create( self, *, - chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN, + chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, @@ -358,7 +383,7 @@ async def create( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the vector store. @@ -446,7 +471,7 @@ async def update( metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maxium of 512 + can be a maximum of 64 characters long and values can be a maximum of 512 characters long. name: The name of the vector store. @@ -504,8 +529,8 @@ def list( before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include before=obj_foo in order to - fetch the previous page of the list. + starting with obj_foo, your subsequent call can include before=obj_foo in order + to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. diff --git a/portkey_ai/_vendor/openai/resources/chat/chat.py b/portkey_ai/_vendor/openai/resources/chat/chat.py index d14d0555..dc23a15a 100644 --- a/portkey_ai/_vendor/openai/resources/chat/chat.py +++ b/portkey_ai/_vendor/openai/resources/chat/chat.py @@ -23,10 +23,21 @@ def completions(self) -> Completions: @cached_property def with_raw_response(self) -> ChatWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return ChatWithRawResponse(self) @cached_property def with_streaming_response(self) -> ChatWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return ChatWithStreamingResponse(self) @@ -37,10 +48,21 @@ def completions(self) -> AsyncCompletions: @cached_property def with_raw_response(self) -> AsyncChatWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncChatWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncChatWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncChatWithStreamingResponse(self) diff --git a/portkey_ai/_vendor/openai/resources/chat/completions.py b/portkey_ai/_vendor/openai/resources/chat/completions.py index 3dcd3774..728c7443 100644 --- a/portkey_ai/_vendor/openai/resources/chat/completions.py +++ b/portkey_ai/_vendor/openai/resources/chat/completions.py @@ -2,10 +2,12 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional, overload -from typing_extensions import Literal +import inspect +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, overload import httpx +import pydantic from ... import _legacy_response from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven @@ -18,14 +20,22 @@ from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ..._streaming import Stream, AsyncStream -from ...types.chat import completion_create_params +from ...types.chat import ( + ChatCompletionAudioParam, + ChatCompletionReasoningEffort, + completion_create_params, +) from ..._base_client import make_request_options from ...types.chat_model import ChatModel from ...types.chat.chat_completion import ChatCompletion from ...types.chat.chat_completion_chunk import ChatCompletionChunk +from ...types.chat.chat_completion_modality import ChatCompletionModality from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam +from ...types.chat.chat_completion_audio_param import ChatCompletionAudioParam from ...types.chat.chat_completion_message_param import ChatCompletionMessageParam +from ...types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort from ...types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam +from ...types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam from ...types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam __all__ = ["Completions", "AsyncCompletions"] @@ -34,10 +44,21 @@ class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return CompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> CompletionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return CompletionsWithStreamingResponse(self) @overload @@ -46,19 +67,26 @@ def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -74,31 +102,51 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. + + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -119,35 +167,62 @@ def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/chat-completions). + + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + reasoning_effort: **o1 models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + response_format: An object specifying the format that the model must output. - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -167,8 +242,11 @@ def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -178,6 +256,10 @@ def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. + stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) @@ -189,9 +271,8 @@ def create( temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -219,7 +300,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -238,19 +319,26 @@ def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: Literal[True], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -265,15 +353,29 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Stream[ChatCompletionChunk]: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. + + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -283,20 +385,26 @@ def create( message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -317,35 +425,62 @@ def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). + + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/chat-completions). - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + reasoning_effort: **o1 models only** - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -365,8 +500,11 @@ def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -376,13 +514,16 @@ def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -410,7 +551,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -429,19 +570,26 @@ def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: bool, + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -456,15 +604,29 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | Stream[ChatCompletionChunk]: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. + + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -474,20 +636,26 @@ def create( message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -508,35 +676,62 @@ def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). + + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/chat-completions). + + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + reasoning_effort: **o1 models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + response_format: An object specifying the format that the model must output. - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -556,8 +751,11 @@ def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -567,13 +765,16 @@ def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -601,7 +802,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -619,19 +820,26 @@ def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -647,25 +855,33 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | Stream[ChatCompletionChunk]: + validate_response_format(response_format) return self._post( "/chat/completions", body=maybe_transform( { "messages": messages, "model": model, + "audio": audio, "frequency_penalty": frequency_penalty, "function_call": function_call, "functions": functions, "logit_bias": logit_bias, "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, "max_tokens": max_tokens, + "metadata": metadata, + "modalities": modalities, "n": n, "parallel_tool_calls": parallel_tool_calls, + "prediction": prediction, "presence_penalty": presence_penalty, + "reasoning_effort": reasoning_effort, "response_format": response_format, "seed": seed, "service_tier": service_tier, "stop": stop, + "store": store, "stream": stream, "stream_options": stream_options, "temperature": temperature, @@ -689,10 +905,21 @@ def create( class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncCompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncCompletionsWithStreamingResponse(self) @overload @@ -701,19 +928,26 @@ async def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -729,31 +963,51 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. + + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -774,35 +1028,62 @@ async def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). + + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/chat-completions). + + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + reasoning_effort: **o1 models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + response_format: An object specifying the format that the model must output. - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -822,8 +1103,11 @@ async def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -833,6 +1117,10 @@ async def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. + stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) @@ -844,9 +1132,8 @@ async def create( temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -874,7 +1161,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -893,19 +1180,26 @@ async def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: Literal[True], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -920,15 +1214,29 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncStream[ChatCompletionChunk]: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. + + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -938,20 +1246,26 @@ async def create( message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -972,35 +1286,62 @@ async def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). + + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/chat-completions). + + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + reasoning_effort: **o1 models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + + response_format: An object specifying the format that the model must output. - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1020,8 +1361,11 @@ async def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -1031,13 +1375,16 @@ async def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -1065,7 +1412,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -1084,19 +1431,26 @@ async def create( messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], stream: bool, + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, @@ -1111,15 +1465,29 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]: - """ - Creates a model response for the given chat conversation. + """Creates a model response for the given chat conversation. + + Learn more in the + [text generation](https://platform.openai.com/docs/guides/text-generation), + [vision](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio) guides. + + Parameter support can differ depending on the model used to generate the + response, particularly for newer reasoning models. Parameters that are only + supported for reasoning models are noted below. For the current state of + unsupported parameters in reasoning models, + [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). Args: - messages: A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + messages: A list of messages comprising the conversation so far. Depending on the + [model](https://platform.openai.com/docs/models) you use, different message + types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). model: ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be @@ -1129,20 +1497,26 @@ async def create( message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + audio: Parameters for audio output. Required when audio output is requested with + `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - function_call: Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. @@ -1163,35 +1537,62 @@ async def create( returns the log probabilities of each output token returned in the `content` of `message`. + max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). + + metadata: Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/chat-completions). + + modalities: Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + `["text", "audio"]` n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. parallel_tool_calls: Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + prediction: Static predicted output content, such as the content of a text file that is + being regenerated. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + reasoning_effort: **o1 models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. - response_format: An object specifying the format that the model must output. Compatible with - [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + response_format: An object specifying the format that the model must output. - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -1211,8 +1612,11 @@ async def create( service_tier: Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -1222,13 +1626,16 @@ async def create( stop: Up to 4 sequences where the API will stop generating further tokens. + store: Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. + stream_options: Options for streaming response. Only set this when you set `stream: true`. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more - focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + focused and deterministic. We generally recommend altering this or `top_p` but + not both. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can @@ -1256,7 +1663,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -1274,19 +1681,26 @@ async def create( *, messages: Iterable[ChatCompletionMessageParam], model: Union[str, ChatModel], + audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, + prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, + store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1302,25 +1716,33 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]: + validate_response_format(response_format) return await self._post( "/chat/completions", body=await async_maybe_transform( { "messages": messages, "model": model, + "audio": audio, "frequency_penalty": frequency_penalty, "function_call": function_call, "functions": functions, "logit_bias": logit_bias, "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, "max_tokens": max_tokens, + "metadata": metadata, + "modalities": modalities, "n": n, "parallel_tool_calls": parallel_tool_calls, + "prediction": prediction, "presence_penalty": presence_penalty, + "reasoning_effort": reasoning_effort, "response_format": response_format, "seed": seed, "service_tier": service_tier, "stop": stop, + "store": store, "stream": stream, "stream_options": stream_options, "temperature": temperature, @@ -1375,3 +1797,10 @@ def __init__(self, completions: AsyncCompletions) -> None: self.create = async_to_streamed_response_wrapper( completions.create, ) + + +def validate_response_format(response_format: object) -> None: + if inspect.isclass(response_format) and issubclass(response_format, pydantic.BaseModel): + raise TypeError( + "You tried to pass a `BaseModel` class to `chat.completions.create()`; You must use `beta.chat.completions.parse()` instead" + ) diff --git a/portkey_ai/_vendor/openai/resources/completions.py b/portkey_ai/_vendor/openai/resources/completions.py index 0812000f..1ac3575f 100644 --- a/portkey_ai/_vendor/openai/resources/completions.py +++ b/portkey_ai/_vendor/openai/resources/completions.py @@ -2,8 +2,8 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional, overload -from typing_extensions import Literal +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, overload import httpx @@ -31,10 +31,21 @@ class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return CompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> CompletionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return CompletionsWithStreamingResponse(self) @overload @@ -73,8 +84,8 @@ def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -99,7 +110,7 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -139,7 +150,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -178,7 +189,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -226,8 +237,8 @@ def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -259,7 +270,7 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -299,7 +310,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -331,7 +342,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -379,8 +390,8 @@ def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -412,7 +423,7 @@ def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -452,7 +463,7 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -484,7 +495,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -562,10 +573,21 @@ def create( class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncCompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncCompletionsWithStreamingResponse(self) @overload @@ -604,8 +626,8 @@ async def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -630,7 +652,7 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -670,7 +692,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -709,7 +731,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -757,8 +779,8 @@ async def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -790,7 +812,7 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -830,7 +852,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -862,7 +884,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -910,8 +932,8 @@ async def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -943,7 +965,7 @@ async def create( existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) logit_bias: Modify the likelihood of specified tokens appearing in the completion. @@ -983,7 +1005,7 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -1015,7 +1037,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers diff --git a/portkey_ai/_vendor/openai/resources/embeddings.py b/portkey_ai/_vendor/openai/resources/embeddings.py index 773b6f09..4ab2278e 100644 --- a/portkey_ai/_vendor/openai/resources/embeddings.py +++ b/portkey_ai/_vendor/openai/resources/embeddings.py @@ -16,9 +16,8 @@ from .._extras import numpy as np, has_numpy from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper -from .._base_client import ( - make_request_options, -) +from .._base_client import make_request_options +from ..types.embedding_model import EmbeddingModel from ..types.create_embedding_response import CreateEmbeddingResponse __all__ = ["Embeddings", "AsyncEmbeddings"] @@ -27,17 +26,28 @@ class Embeddings(SyncAPIResource): @cached_property def with_raw_response(self) -> EmbeddingsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return EmbeddingsWithRawResponse(self) @cached_property def with_streaming_response(self) -> EmbeddingsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return EmbeddingsWithStreamingResponse(self) def create( self, *, input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], - model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], + model: Union[str, EmbeddingModel], dimensions: int | NotGiven = NOT_GIVEN, encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -63,8 +73,8 @@ def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. dimensions: The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. @@ -74,7 +84,7 @@ def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -128,17 +138,28 @@ def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse: class AsyncEmbeddings(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncEmbeddingsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncEmbeddingsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncEmbeddingsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncEmbeddingsWithStreamingResponse(self) async def create( self, *, input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], - model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], + model: Union[str, EmbeddingModel], dimensions: int | NotGiven = NOT_GIVEN, encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -164,8 +185,8 @@ async def create( model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. dimensions: The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. @@ -175,7 +196,7 @@ async def create( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers diff --git a/portkey_ai/_vendor/openai/resources/files.py b/portkey_ai/_vendor/openai/resources/files.py index 75c971a8..6eaea1b5 100644 --- a/portkey_ai/_vendor/openai/resources/files.py +++ b/portkey_ai/_vendor/openai/resources/files.py @@ -10,7 +10,7 @@ import httpx from .. import _legacy_response -from ..types import file_list_params, file_create_params +from ..types import FilePurpose, file_list_params, file_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from .._utils import ( extract_files, @@ -28,13 +28,11 @@ to_custom_streamed_response_wrapper, async_to_custom_streamed_response_wrapper, ) -from ..pagination import SyncPage, AsyncPage -from .._base_client import ( - AsyncPaginator, - make_request_options, -) +from ..pagination import SyncCursorPage, AsyncCursorPage +from .._base_client import AsyncPaginator, make_request_options from ..types.file_object import FileObject from ..types.file_deleted import FileDeleted +from ..types.file_purpose import FilePurpose __all__ = ["Files", "AsyncFiles"] @@ -42,17 +40,28 @@ class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return FilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> FilesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return FilesWithStreamingResponse(self) def create( self, *, file: FileTypes, - purpose: Literal["assistants", "batch", "fine-tune", "vision"], + purpose: FilePurpose, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -77,7 +86,7 @@ def create( [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) models. - The Batch API only supports `.jsonl` files up to 100 MB in size. The input also + The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](https://platform.openai.com/docs/api-reference/batch/request-input). @@ -161,6 +170,9 @@ def retrieve( def list( self, *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, purpose: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -168,11 +180,23 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SyncPage[FileObject]: - """ - Returns a list of files that belong to the user's organization. + ) -> SyncCursorPage[FileObject]: + """Returns a list of files. Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 10,000, and the default is 10,000. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + purpose: Only return files with the given purpose. extra_headers: Send extra headers @@ -185,13 +209,21 @@ def list( """ return self._get_api_list( "/files", - page=SyncPage[FileObject], + page=SyncCursorPage[FileObject], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=maybe_transform({"purpose": purpose}, file_list_params.FileListParams), + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + "purpose": purpose, + }, + file_list_params.FileListParams, + ), ), model=FileObject, ) @@ -324,17 +356,28 @@ def wait_for_processing( class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncFilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFilesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncFilesWithStreamingResponse(self) async def create( self, *, file: FileTypes, - purpose: Literal["assistants", "batch", "fine-tune", "vision"], + purpose: FilePurpose, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -359,7 +402,7 @@ async def create( [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) models. - The Batch API only supports `.jsonl` files up to 100 MB in size. The input also + The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](https://platform.openai.com/docs/api-reference/batch/request-input). @@ -443,6 +486,9 @@ async def retrieve( def list( self, *, + after: str | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, purpose: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -450,11 +496,23 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncPaginator[FileObject, AsyncPage[FileObject]]: - """ - Returns a list of files that belong to the user's organization. + ) -> AsyncPaginator[FileObject, AsyncCursorPage[FileObject]]: + """Returns a list of files. Args: + after: A cursor for use in pagination. + + `after` is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, + ending with obj_foo, your subsequent call can include after=obj_foo in order to + fetch the next page of the list. + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 10,000, and the default is 10,000. + + order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + purpose: Only return files with the given purpose. extra_headers: Send extra headers @@ -467,13 +525,21 @@ def list( """ return self._get_api_list( "/files", - page=AsyncPage[FileObject], + page=AsyncCursorPage[FileObject], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - query=maybe_transform({"purpose": purpose}, file_list_params.FileListParams), + query=maybe_transform( + { + "after": after, + "limit": limit, + "order": order, + "purpose": purpose, + }, + file_list_params.FileListParams, + ), ), model=FileObject, ) diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/fine_tuning.py b/portkey_ai/_vendor/openai/resources/fine_tuning/fine_tuning.py index 0404fed6..d2bce87c 100644 --- a/portkey_ai/_vendor/openai/resources/fine_tuning/fine_tuning.py +++ b/portkey_ai/_vendor/openai/resources/fine_tuning/fine_tuning.py @@ -2,7 +2,8 @@ from __future__ import annotations -from .jobs import ( +from ..._compat import cached_property +from .jobs.jobs import ( Jobs, AsyncJobs, JobsWithRawResponse, @@ -10,8 +11,6 @@ JobsWithStreamingResponse, AsyncJobsWithStreamingResponse, ) -from ..._compat import cached_property -from .jobs.jobs import Jobs, AsyncJobs from ..._resource import SyncAPIResource, AsyncAPIResource __all__ = ["FineTuning", "AsyncFineTuning"] @@ -24,10 +23,21 @@ def jobs(self) -> Jobs: @cached_property def with_raw_response(self) -> FineTuningWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return FineTuningWithRawResponse(self) @cached_property def with_streaming_response(self) -> FineTuningWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return FineTuningWithStreamingResponse(self) @@ -38,10 +48,21 @@ def jobs(self) -> AsyncJobs: @cached_property def with_raw_response(self) -> AsyncFineTuningWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncFineTuningWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFineTuningWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncFineTuningWithStreamingResponse(self) diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/checkpoints.py b/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/checkpoints.py index 67f5739a..8b5e905e 100644 --- a/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/checkpoints.py +++ b/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/checkpoints.py @@ -24,10 +24,21 @@ class Checkpoints(SyncAPIResource): @cached_property def with_raw_response(self) -> CheckpointsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return CheckpointsWithRawResponse(self) @cached_property def with_streaming_response(self) -> CheckpointsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return CheckpointsWithStreamingResponse(self) def list( @@ -84,10 +95,21 @@ def list( class AsyncCheckpoints(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCheckpointsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncCheckpointsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncCheckpointsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncCheckpointsWithStreamingResponse(self) def list( diff --git a/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py b/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py index 5cef7bcd..78eefc25 100644 --- a/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py +++ b/portkey_ai/_vendor/openai/resources/fine_tuning/jobs/jobs.py @@ -43,10 +43,21 @@ def checkpoints(self) -> Checkpoints: @cached_property def with_raw_response(self) -> JobsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return JobsWithRawResponse(self) @cached_property def with_streaming_response(self) -> JobsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return JobsWithStreamingResponse(self) def create( @@ -56,6 +67,7 @@ def create( training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, + method: job_create_params.Method | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, validation_file: Optional[str] | NotGiven = NOT_GIVEN, @@ -77,7 +89,7 @@ def create( Args: model: The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. @@ -88,22 +100,27 @@ def create( your file with the purpose `fine-tune`. The contents of the file should differ depending on if the model uses the - [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + format, or if the fine-tuning method uses the + [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. - hyperparameters: The hyperparameters used for the fine-tuning job. + hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated + in favor of `method`, and should be passed in under the `method` parameter. integrations: A list of integrations to enable for your fine-tuning job. + method: The method used for fine-tuning. + seed: The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you. - suffix: A string of up to 18 characters that will be added to your fine-tuned model + suffix: A string of up to 64 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like @@ -138,6 +155,7 @@ def create( "training_file": training_file, "hyperparameters": hyperparameters, "integrations": integrations, + "method": method, "seed": seed, "suffix": suffix, "validation_file": validation_file, @@ -323,10 +341,21 @@ def checkpoints(self) -> AsyncCheckpoints: @cached_property def with_raw_response(self) -> AsyncJobsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncJobsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncJobsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncJobsWithStreamingResponse(self) async def create( @@ -336,6 +365,7 @@ async def create( training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, + method: job_create_params.Method | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, validation_file: Optional[str] | NotGiven = NOT_GIVEN, @@ -357,7 +387,7 @@ async def create( Args: model: The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. @@ -368,22 +398,27 @@ async def create( your file with the purpose `fine-tune`. The contents of the file should differ depending on if the model uses the - [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + format, or if the fine-tuning method uses the + [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. - hyperparameters: The hyperparameters used for the fine-tuning job. + hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated + in favor of `method`, and should be passed in under the `method` parameter. integrations: A list of integrations to enable for your fine-tuning job. + method: The method used for fine-tuning. + seed: The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you. - suffix: A string of up to 18 characters that will be added to your fine-tuned model + suffix: A string of up to 64 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like @@ -418,6 +453,7 @@ async def create( "training_file": training_file, "hyperparameters": hyperparameters, "integrations": integrations, + "method": method, "seed": seed, "suffix": suffix, "validation_file": validation_file, diff --git a/portkey_ai/_vendor/openai/resources/images.py b/portkey_ai/_vendor/openai/resources/images.py index 0913b572..2fbc077d 100644 --- a/portkey_ai/_vendor/openai/resources/images.py +++ b/portkey_ai/_vendor/openai/resources/images.py @@ -29,10 +29,21 @@ class Images(SyncAPIResource): @cached_property def with_raw_response(self) -> ImagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return ImagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> ImagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return ImagesWithStreamingResponse(self) def create_variation( @@ -73,7 +84,7 @@ def create_variation( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -154,7 +165,7 @@ def edit( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -240,7 +251,7 @@ def generate( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -275,10 +286,21 @@ def generate( class AsyncImages(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncImagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncImagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncImagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncImagesWithStreamingResponse(self) async def create_variation( @@ -319,7 +341,7 @@ async def create_variation( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -400,7 +422,7 @@ async def edit( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers @@ -486,7 +508,7 @@ async def generate( user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). extra_headers: Send extra headers diff --git a/portkey_ai/_vendor/openai/resources/models.py b/portkey_ai/_vendor/openai/resources/models.py index e76c496f..d6062de2 100644 --- a/portkey_ai/_vendor/openai/resources/models.py +++ b/portkey_ai/_vendor/openai/resources/models.py @@ -23,10 +23,21 @@ class Models(SyncAPIResource): @cached_property def with_raw_response(self) -> ModelsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return ModelsWithRawResponse(self) @cached_property def with_streaming_response(self) -> ModelsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return ModelsWithStreamingResponse(self) def retrieve( @@ -125,10 +136,21 @@ def delete( class AsyncModels(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModelsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncModelsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncModelsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncModelsWithStreamingResponse(self) async def retrieve( diff --git a/portkey_ai/_vendor/openai/resources/moderations.py b/portkey_ai/_vendor/openai/resources/moderations.py index b9ad9972..ce80bb7d 100644 --- a/portkey_ai/_vendor/openai/resources/moderations.py +++ b/portkey_ai/_vendor/openai/resources/moderations.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union +from typing import List, Union, Iterable import httpx @@ -19,6 +19,7 @@ from .._base_client import make_request_options from ..types.moderation_model import ModerationModel from ..types.moderation_create_response import ModerationCreateResponse +from ..types.moderation_multi_modal_input_param import ModerationMultiModalInputParam __all__ = ["Moderations", "AsyncModerations"] @@ -26,16 +27,27 @@ class Moderations(SyncAPIResource): @cached_property def with_raw_response(self) -> ModerationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return ModerationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> ModerationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return ModerationsWithStreamingResponse(self) def create( self, *, - input: Union[str, List[str]], + input: Union[str, List[str], Iterable[ModerationMultiModalInputParam]], model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -44,20 +56,19 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModerationCreateResponse: - """ - Classifies if text is potentially harmful. + """Classifies if text and/or image inputs are potentially harmful. - Args: - input: The input text to classify + Learn more in + the [moderation guide](https://platform.openai.com/docs/guides/moderation). - model: Two content moderations models are available: `text-moderation-stable` and - `text-moderation-latest`. + Args: + input: Input (or inputs) to classify. Can be a single string, an array of strings, or + an array of multi-modal input objects similar to other models. - The default is `text-moderation-latest` which will be automatically upgraded - over time. This ensures you are always using our most accurate model. If you use - `text-moderation-stable`, we will provide advanced notice before updating the - model. Accuracy of `text-moderation-stable` may be slightly lower than for - `text-moderation-latest`. + model: The content moderation model you would like to use. Learn more in + [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + learn about available models + [here](https://platform.openai.com/docs/models#moderation). extra_headers: Send extra headers @@ -86,16 +97,27 @@ def create( class AsyncModerations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModerationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncModerationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncModerationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncModerationsWithStreamingResponse(self) async def create( self, *, - input: Union[str, List[str]], + input: Union[str, List[str], Iterable[ModerationMultiModalInputParam]], model: Union[str, ModerationModel] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -104,20 +126,19 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModerationCreateResponse: - """ - Classifies if text is potentially harmful. + """Classifies if text and/or image inputs are potentially harmful. - Args: - input: The input text to classify + Learn more in + the [moderation guide](https://platform.openai.com/docs/guides/moderation). - model: Two content moderations models are available: `text-moderation-stable` and - `text-moderation-latest`. + Args: + input: Input (or inputs) to classify. Can be a single string, an array of strings, or + an array of multi-modal input objects similar to other models. - The default is `text-moderation-latest` which will be automatically upgraded - over time. This ensures you are always using our most accurate model. If you use - `text-moderation-stable`, we will provide advanced notice before updating the - model. Accuracy of `text-moderation-stable` may be slightly lower than for - `text-moderation-latest`. + model: The content moderation model you would like to use. Learn more in + [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + learn about available models + [here](https://platform.openai.com/docs/models#moderation). extra_headers: Send extra headers diff --git a/portkey_ai/_vendor/openai/resources/uploads/parts.py b/portkey_ai/_vendor/openai/resources/uploads/parts.py index 3ec2592b..d46e5ea1 100644 --- a/portkey_ai/_vendor/openai/resources/uploads/parts.py +++ b/portkey_ai/_vendor/openai/resources/uploads/parts.py @@ -27,10 +27,21 @@ class Parts(SyncAPIResource): @cached_property def with_raw_response(self) -> PartsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return PartsWithRawResponse(self) @cached_property def with_streaming_response(self) -> PartsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return PartsWithStreamingResponse(self) def create( @@ -91,10 +102,21 @@ def create( class AsyncParts(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncPartsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncPartsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncPartsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncPartsWithStreamingResponse(self) async def create( diff --git a/portkey_ai/_vendor/openai/resources/uploads/uploads.py b/portkey_ai/_vendor/openai/resources/uploads/uploads.py index 4100423d..cfb500b6 100644 --- a/portkey_ai/_vendor/openai/resources/uploads/uploads.py +++ b/portkey_ai/_vendor/openai/resources/uploads/uploads.py @@ -2,9 +2,14 @@ from __future__ import annotations -from typing import List -from typing_extensions import Literal - +import io +import os +import logging +import builtins +from typing import List, overload +from pathlib import Path + +import anyio import httpx from ... import _legacy_response @@ -16,7 +21,7 @@ PartsWithStreamingResponse, AsyncPartsWithStreamingResponse, ) -from ...types import upload_create_params, upload_complete_params +from ...types import FilePurpose, upload_create_params, upload_complete_params from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import ( maybe_transform, @@ -27,10 +32,17 @@ from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ..._base_client import make_request_options from ...types.upload import Upload +from ...types.file_purpose import FilePurpose __all__ = ["Uploads", "AsyncUploads"] +# 64MB +DEFAULT_PART_SIZE = 64 * 1024 * 1024 + +log: logging.Logger = logging.getLogger(__name__) + + class Uploads(SyncAPIResource): @cached_property def parts(self) -> Parts: @@ -38,19 +50,129 @@ def parts(self) -> Parts: @cached_property def with_raw_response(self) -> UploadsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return UploadsWithRawResponse(self) @cached_property def with_streaming_response(self) -> UploadsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return UploadsWithStreamingResponse(self) + @overload + def upload_file_chunked( + self, + *, + file: os.PathLike[str], + mime_type: str, + purpose: FilePurpose, + bytes: int | None = None, + part_size: int | None = None, + md5: str | NotGiven = NOT_GIVEN, + ) -> Upload: + """Splits a file into multiple 64MB parts and uploads them sequentially.""" + + @overload + def upload_file_chunked( + self, + *, + file: bytes, + filename: str, + bytes: int, + mime_type: str, + purpose: FilePurpose, + part_size: int | None = None, + md5: str | NotGiven = NOT_GIVEN, + ) -> Upload: + """Splits an in-memory file into multiple 64MB parts and uploads them sequentially.""" + + def upload_file_chunked( + self, + *, + file: os.PathLike[str] | bytes, + mime_type: str, + purpose: FilePurpose, + filename: str | None = None, + bytes: int | None = None, + part_size: int | None = None, + md5: str | NotGiven = NOT_GIVEN, + ) -> Upload: + """Splits the given file into multiple parts and uploads them sequentially. + + ```py + from pathlib import Path + + client.uploads.upload_file( + file=Path("my-paper.pdf"), + mime_type="pdf", + purpose="assistants", + ) + ``` + """ + if isinstance(file, builtins.bytes): + if filename is None: + raise TypeError("The `filename` argument must be given for in-memory files") + + if bytes is None: + raise TypeError("The `bytes` argument must be given for in-memory files") + else: + if not isinstance(file, Path): + file = Path(file) + + if not filename: + filename = file.name + + if bytes is None: + bytes = file.stat().st_size + + upload = self.create( + bytes=bytes, + filename=filename, + mime_type=mime_type, + purpose=purpose, + ) + + part_ids: list[str] = [] + + if part_size is None: + part_size = DEFAULT_PART_SIZE + + if isinstance(file, builtins.bytes): + buf: io.FileIO | io.BytesIO = io.BytesIO(file) + else: + buf = io.FileIO(file) + + try: + while True: + data = buf.read(part_size) + if not data: + # EOF + break + + part = self.parts.create(upload_id=upload.id, data=data) + log.info("Uploaded part %s for upload %s", part.id, upload.id) + part_ids.append(part.id) + except Exception: + buf.close() + raise + + return self.complete(upload_id=upload.id, part_ids=part_ids, md5=md5) + def create( self, *, bytes: int, filename: str, mime_type: str, - purpose: Literal["assistants", "batch", "fine-tune", "vision"], + purpose: FilePurpose, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -74,7 +196,7 @@ def create( For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: - - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files) + - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) For guidance on the proper filename extensions for each purpose, please follow the documentation on @@ -221,19 +343,140 @@ def parts(self) -> AsyncParts: @cached_property def with_raw_response(self) -> AsyncUploadsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return the + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ return AsyncUploadsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncUploadsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ return AsyncUploadsWithStreamingResponse(self) + @overload + async def upload_file_chunked( + self, + *, + file: os.PathLike[str], + mime_type: str, + purpose: FilePurpose, + bytes: int | None = None, + part_size: int | None = None, + md5: str | NotGiven = NOT_GIVEN, + ) -> Upload: + """Splits a file into multiple 64MB parts and uploads them sequentially.""" + + @overload + async def upload_file_chunked( + self, + *, + file: bytes, + filename: str, + bytes: int, + mime_type: str, + purpose: FilePurpose, + part_size: int | None = None, + md5: str | NotGiven = NOT_GIVEN, + ) -> Upload: + """Splits an in-memory file into multiple 64MB parts and uploads them sequentially.""" + + async def upload_file_chunked( + self, + *, + file: os.PathLike[str] | bytes, + mime_type: str, + purpose: FilePurpose, + filename: str | None = None, + bytes: int | None = None, + part_size: int | None = None, + md5: str | NotGiven = NOT_GIVEN, + ) -> Upload: + """Splits the given file into multiple parts and uploads them sequentially. + + ```py + from pathlib import Path + + client.uploads.upload_file( + file=Path("my-paper.pdf"), + mime_type="pdf", + purpose="assistants", + ) + ``` + """ + if isinstance(file, builtins.bytes): + if filename is None: + raise TypeError("The `filename` argument must be given for in-memory files") + + if bytes is None: + raise TypeError("The `bytes` argument must be given for in-memory files") + else: + if not isinstance(file, anyio.Path): + file = anyio.Path(file) + + if not filename: + filename = file.name + + if bytes is None: + stat = await file.stat() + bytes = stat.st_size + + upload = await self.create( + bytes=bytes, + filename=filename, + mime_type=mime_type, + purpose=purpose, + ) + + part_ids: list[str] = [] + + if part_size is None: + part_size = DEFAULT_PART_SIZE + + if isinstance(file, anyio.Path): + fd = await file.open("rb") + async with fd: + while True: + data = await fd.read(part_size) + if not data: + # EOF + break + + part = await self.parts.create(upload_id=upload.id, data=data) + log.info("Uploaded part %s for upload %s", part.id, upload.id) + part_ids.append(part.id) + else: + buf = io.BytesIO(file) + + try: + while True: + data = buf.read(part_size) + if not data: + # EOF + break + + part = await self.parts.create(upload_id=upload.id, data=data) + log.info("Uploaded part %s for upload %s", part.id, upload.id) + part_ids.append(part.id) + except Exception: + buf.close() + raise + + return await self.complete(upload_id=upload.id, part_ids=part_ids, md5=md5) + async def create( self, *, bytes: int, filename: str, mime_type: str, - purpose: Literal["assistants", "batch", "fine-tune", "vision"], + purpose: FilePurpose, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -257,7 +500,7 @@ async def create( For certain `purpose`s, the correct `mime_type` must be specified. Please refer to documentation for the supported MIME types for your use case: - - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files) + - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) For guidance on the proper filename extensions for each purpose, please follow the documentation on diff --git a/portkey_ai/_vendor/openai/types/__init__.py b/portkey_ai/_vendor/openai/types/__init__.py index f621fb67..72950f24 100644 --- a/portkey_ai/_vendor/openai/types/__init__.py +++ b/portkey_ai/_vendor/openai/types/__init__.py @@ -24,7 +24,9 @@ from .image_model import ImageModel as ImageModel from .file_content import FileContent as FileContent from .file_deleted import FileDeleted as FileDeleted +from .file_purpose import FilePurpose as FilePurpose from .model_deleted import ModelDeleted as ModelDeleted +from .embedding_model import EmbeddingModel as EmbeddingModel from .images_response import ImagesResponse as ImagesResponse from .completion_usage import CompletionUsage as CompletionUsage from .file_list_params import FileListParams as FileListParams @@ -36,6 +38,7 @@ from .batch_create_params import BatchCreateParams as BatchCreateParams from .batch_request_counts import BatchRequestCounts as BatchRequestCounts from .upload_create_params import UploadCreateParams as UploadCreateParams +from .audio_response_format import AudioResponseFormat as AudioResponseFormat from .image_generate_params import ImageGenerateParams as ImageGenerateParams from .upload_complete_params import UploadCompleteParams as UploadCompleteParams from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams @@ -43,4 +46,8 @@ from .moderation_create_params import ModerationCreateParams as ModerationCreateParams from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse +from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam +from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams +from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam +from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam diff --git a/portkey_ai/_vendor/openai/types/audio/__init__.py b/portkey_ai/_vendor/openai/types/audio/__init__.py index 1de5c0ff..822e0f3a 100644 --- a/portkey_ai/_vendor/openai/types/audio/__init__.py +++ b/portkey_ai/_vendor/openai/types/audio/__init__.py @@ -5,6 +5,12 @@ from .translation import Translation as Translation from .speech_model import SpeechModel as SpeechModel from .transcription import Transcription as Transcription +from .transcription_word import TranscriptionWord as TranscriptionWord +from .translation_verbose import TranslationVerbose as TranslationVerbose from .speech_create_params import SpeechCreateParams as SpeechCreateParams +from .transcription_segment import TranscriptionSegment as TranscriptionSegment +from .transcription_verbose import TranscriptionVerbose as TranscriptionVerbose from .translation_create_params import TranslationCreateParams as TranslationCreateParams from .transcription_create_params import TranscriptionCreateParams as TranscriptionCreateParams +from .translation_create_response import TranslationCreateResponse as TranslationCreateResponse +from .transcription_create_response import TranscriptionCreateResponse as TranscriptionCreateResponse diff --git a/portkey_ai/_vendor/openai/types/audio/speech_create_params.py b/portkey_ai/_vendor/openai/types/audio/speech_create_params.py index dff66e49..a60d0007 100644 --- a/portkey_ai/_vendor/openai/types/audio/speech_create_params.py +++ b/portkey_ai/_vendor/openai/types/audio/speech_create_params.py @@ -16,7 +16,7 @@ class SpeechCreateParams(TypedDict, total=False): model: Required[Union[str, SpeechModel]] """ - One of the available [TTS models](https://platform.openai.com/docs/models/tts): + One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` """ @@ -25,7 +25,7 @@ class SpeechCreateParams(TypedDict, total=False): Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the - [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). + [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). """ response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_create_params.py b/portkey_ai/_vendor/openai/types/audio/transcription_create_params.py index a825fefe..88805aff 100644 --- a/portkey_ai/_vendor/openai/types/audio/transcription_create_params.py +++ b/portkey_ai/_vendor/openai/types/audio/transcription_create_params.py @@ -7,6 +7,7 @@ from ..._types import FileTypes from ..audio_model import AudioModel +from ..audio_response_format import AudioResponseFormat __all__ = ["TranscriptionCreateParams"] @@ -37,14 +38,14 @@ class TranscriptionCreateParams(TypedDict, total=False): """An optional text to guide the model's style or continue a previous audio segment. - The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should match the audio language. """ - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] + response_format: AudioResponseFormat """ - The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. """ temperature: float diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_create_response.py b/portkey_ai/_vendor/openai/types/audio/transcription_create_response.py new file mode 100644 index 00000000..2f7bed81 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/audio/transcription_create_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import TypeAlias + +from .transcription import Transcription +from .transcription_verbose import TranscriptionVerbose + +__all__ = ["TranscriptionCreateResponse"] + +TranscriptionCreateResponse: TypeAlias = Union[Transcription, TranscriptionVerbose] diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_segment.py b/portkey_ai/_vendor/openai/types/audio/transcription_segment.py new file mode 100644 index 00000000..522c401e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/audio/transcription_segment.py @@ -0,0 +1,49 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel + +__all__ = ["TranscriptionSegment"] + + +class TranscriptionSegment(BaseModel): + id: int + """Unique identifier of the segment.""" + + avg_logprob: float + """Average logprob of the segment. + + If the value is lower than -1, consider the logprobs failed. + """ + + compression_ratio: float + """Compression ratio of the segment. + + If the value is greater than 2.4, consider the compression failed. + """ + + end: float + """End time of the segment in seconds.""" + + no_speech_prob: float + """Probability of no speech in the segment. + + If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this + segment silent. + """ + + seek: int + """Seek offset of the segment.""" + + start: float + """Start time of the segment in seconds.""" + + temperature: float + """Temperature parameter used for generating the segment.""" + + text: str + """Text content of the segment.""" + + tokens: List[int] + """Array of token IDs for the text content.""" diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_verbose.py b/portkey_ai/_vendor/openai/types/audio/transcription_verbose.py new file mode 100644 index 00000000..3b18fa48 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/audio/transcription_verbose.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .transcription_word import TranscriptionWord +from .transcription_segment import TranscriptionSegment + +__all__ = ["TranscriptionVerbose"] + + +class TranscriptionVerbose(BaseModel): + duration: str + """The duration of the input audio.""" + + language: str + """The language of the input audio.""" + + text: str + """The transcribed text.""" + + segments: Optional[List[TranscriptionSegment]] = None + """Segments of the transcribed text and their corresponding details.""" + + words: Optional[List[TranscriptionWord]] = None + """Extracted words and their corresponding timestamps.""" diff --git a/portkey_ai/_vendor/openai/types/audio/transcription_word.py b/portkey_ai/_vendor/openai/types/audio/transcription_word.py new file mode 100644 index 00000000..969da325 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/audio/transcription_word.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + +from ..._models import BaseModel + +__all__ = ["TranscriptionWord"] + + +class TranscriptionWord(BaseModel): + end: float + """End time of the word in seconds.""" + + start: float + """Start time of the word in seconds.""" + + word: str + """The text content of the word.""" diff --git a/portkey_ai/_vendor/openai/types/audio/translation_create_params.py b/portkey_ai/_vendor/openai/types/audio/translation_create_params.py index 054996a1..62f85b87 100644 --- a/portkey_ai/_vendor/openai/types/audio/translation_create_params.py +++ b/portkey_ai/_vendor/openai/types/audio/translation_create_params.py @@ -7,6 +7,7 @@ from ..._types import FileTypes from ..audio_model import AudioModel +from ..audio_response_format import AudioResponseFormat __all__ = ["TranslationCreateParams"] @@ -29,14 +30,14 @@ class TranslationCreateParams(TypedDict, total=False): """An optional text to guide the model's style or continue a previous audio segment. - The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) should be in English. """ - response_format: str + response_format: AudioResponseFormat """ - The format of the transcript output, in one of these options: `json`, `text`, - `srt`, `verbose_json`, or `vtt`. + The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. """ temperature: float diff --git a/portkey_ai/_vendor/openai/types/audio/translation_create_response.py b/portkey_ai/_vendor/openai/types/audio/translation_create_response.py new file mode 100644 index 00000000..9953813c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/audio/translation_create_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import TypeAlias + +from .translation import Translation +from .translation_verbose import TranslationVerbose + +__all__ = ["TranslationCreateResponse"] + +TranslationCreateResponse: TypeAlias = Union[Translation, TranslationVerbose] diff --git a/portkey_ai/_vendor/openai/types/audio/translation_verbose.py b/portkey_ai/_vendor/openai/types/audio/translation_verbose.py new file mode 100644 index 00000000..5901ae75 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/audio/translation_verbose.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .transcription_segment import TranscriptionSegment + +__all__ = ["TranslationVerbose"] + + +class TranslationVerbose(BaseModel): + duration: str + """The duration of the input audio.""" + + language: str + """The language of the output translation (always `english`).""" + + text: str + """The translated text.""" + + segments: Optional[List[TranscriptionSegment]] = None + """Segments of the translated text and their corresponding details.""" diff --git a/portkey_ai/_vendor/openai/types/audio_response_format.py b/portkey_ai/_vendor/openai/types/audio_response_format.py new file mode 100644 index 00000000..f8c8d459 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/audio_response_format.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["AudioResponseFormat"] + +AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json", "vtt"] diff --git a/portkey_ai/_vendor/openai/types/batch.py b/portkey_ai/_vendor/openai/types/batch.py index 90f6d795..ac3d7ea1 100644 --- a/portkey_ai/_vendor/openai/types/batch.py +++ b/portkey_ai/_vendor/openai/types/batch.py @@ -75,7 +75,7 @@ class Batch(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ output_file_id: Optional[str] = None diff --git a/portkey_ai/_vendor/openai/types/batch_create_params.py b/portkey_ai/_vendor/openai/types/batch_create_params.py index 55517d28..b30c4d46 100644 --- a/portkey_ai/_vendor/openai/types/batch_create_params.py +++ b/portkey_ai/_vendor/openai/types/batch_create_params.py @@ -32,7 +32,7 @@ class BatchCreateParams(TypedDict, total=False): Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - requests, and can be up to 100 MB in size. + requests, and can be up to 200 MB in size. """ metadata: Optional[Dict[str, str]] diff --git a/portkey_ai/_vendor/openai/types/beta/__init__.py b/portkey_ai/_vendor/openai/types/beta/__init__.py index 9c5ddfdb..7f76fed0 100644 --- a/portkey_ai/_vendor/openai/types/beta/__init__.py +++ b/portkey_ai/_vendor/openai/types/beta/__init__.py @@ -19,6 +19,7 @@ from .assistant_tool_choice import AssistantToolChoice as AssistantToolChoice from .code_interpreter_tool import CodeInterpreterTool as CodeInterpreterTool from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent +from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam from .assistant_create_params import AssistantCreateParams as AssistantCreateParams from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams @@ -28,11 +29,17 @@ from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption +from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams +from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption +from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam +from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject +from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam +from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject from .assistant_response_format_option_param import ( AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam, ) diff --git a/portkey_ai/_vendor/openai/types/beta/assistant.py b/portkey_ai/_vendor/openai/types/beta/assistant.py index c6a0a4cf..3c8b8e40 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant.py @@ -56,7 +56,7 @@ class Assistant(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ model: str @@ -65,8 +65,8 @@ class Assistant(BaseModel): You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. """ name: Optional[str] = None @@ -85,16 +85,16 @@ class Assistant(BaseModel): response_format: Optional[AssistantResponseFormatOption] = None """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py b/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py index 84cd4425..568b223c 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_create_params.py @@ -3,10 +3,11 @@ from __future__ import annotations from typing import List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict +from typing_extensions import Required, TypedDict from ..chat_model import ChatModel from .assistant_tool_param import AssistantToolParam +from .file_chunking_strategy_param import FileChunkingStrategyParam from .assistant_response_format_option_param import AssistantResponseFormatOptionParam __all__ = [ @@ -15,10 +16,6 @@ "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "ToolResourcesFileSearchVectorStore", - "ToolResourcesFileSearchVectorStoreChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", ] @@ -29,8 +26,8 @@ class AssistantCreateParams(TypedDict, total=False): You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. """ description: Optional[str] @@ -47,7 +44,7 @@ class AssistantCreateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ name: Optional[str] @@ -56,16 +53,16 @@ class AssistantCreateParams(TypedDict, total=False): response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -118,43 +115,12 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): """ -class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): - static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ - ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic -] - - class ToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. """ file_ids: List[str] @@ -169,7 +135,7 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can - be a maxium of 512 characters long. + be a maximum of 512 characters long. """ diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_list_params.py b/portkey_ai/_vendor/openai/types/beta/assistant_list_params.py index f54f6312..834ffbca 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_list_params.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_list_params.py @@ -21,7 +21,7 @@ class AssistantListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_response_format_option_param.py b/portkey_ai/_vendor/openai/types/beta/assistant_response_format_option_param.py index 680a060c..5e724a4d 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_response_format_option_param.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_response_format_option_param.py @@ -5,13 +5,12 @@ from typing import Union from typing_extensions import Literal, TypeAlias -from ...types import shared_params +from ..shared_params.response_format_text import ResponseFormatText +from ..shared_params.response_format_json_object import ResponseFormatJSONObject +from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema __all__ = ["AssistantResponseFormatOptionParam"] AssistantResponseFormatOptionParam: TypeAlias = Union[ - Literal["auto"], - shared_params.ResponseFormatText, - shared_params.ResponseFormatJSONObject, - shared_params.ResponseFormatJSONSchema, + Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema ] diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_stream_event.py b/portkey_ai/_vendor/openai/types/beta/assistant_stream_event.py index f1d8898f..41d3a0c5 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_stream_event.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_stream_event.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Union +from typing import Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from .thread import Thread @@ -51,6 +51,9 @@ class ThreadCreated(BaseModel): event: Literal["thread.created"] + enabled: Optional[bool] = None + """Whether to enable input audio transcription.""" + class ThreadRunCreated(BaseModel): data: Run diff --git a/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py b/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py index ade56581..9a66e41a 100644 --- a/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py +++ b/portkey_ai/_vendor/openai/types/beta/assistant_update_params.py @@ -26,7 +26,7 @@ class AssistantUpdateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ model: str @@ -35,8 +35,8 @@ class AssistantUpdateParams(TypedDict, total=False): You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. """ name: Optional[str] @@ -45,16 +45,16 @@ class AssistantUpdateParams(TypedDict, total=False): response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/portkey_ai/_vendor/openai/types/beta/auto_file_chunking_strategy_param.py b/portkey_ai/_vendor/openai/types/beta/auto_file_chunking_strategy_param.py new file mode 100644 index 00000000..6f17836b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/auto_file_chunking_strategy_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["AutoFileChunkingStrategyParam"] + + +class AutoFileChunkingStrategyParam(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/file_chunking_strategy.py b/portkey_ai/_vendor/openai/types/beta/file_chunking_strategy.py new file mode 100644 index 00000000..406d69dd --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/file_chunking_strategy.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject +from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject + +__all__ = ["FileChunkingStrategy"] + +FileChunkingStrategy: TypeAlias = Annotated[ + Union[StaticFileChunkingStrategyObject, OtherFileChunkingStrategyObject], PropertyInfo(discriminator="type") +] diff --git a/portkey_ai/_vendor/openai/types/beta/file_chunking_strategy_param.py b/portkey_ai/_vendor/openai/types/beta/file_chunking_strategy_param.py new file mode 100644 index 00000000..46383358 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/file_chunking_strategy_param.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam +from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam + +__all__ = ["FileChunkingStrategyParam"] + +FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyParam] diff --git a/portkey_ai/_vendor/openai/types/beta/file_search_tool.py b/portkey_ai/_vendor/openai/types/beta/file_search_tool.py index 26ab1cb8..89fc16c0 100644 --- a/portkey_ai/_vendor/openai/types/beta/file_search_tool.py +++ b/portkey_ai/_vendor/openai/types/beta/file_search_tool.py @@ -5,7 +5,21 @@ from ..._models import BaseModel -__all__ = ["FileSearchTool", "FileSearch"] +__all__ = ["FileSearchTool", "FileSearch", "FileSearchRankingOptions"] + + +class FileSearchRankingOptions(BaseModel): + score_threshold: float + """The score threshold for the file search. + + All values must be a floating point number between 0 and 1. + """ + + ranker: Optional[Literal["auto", "default_2024_08_21"]] = None + """The ranker to use for the file search. + + If not specified will use the `auto` ranker. + """ class FileSearch(BaseModel): @@ -17,7 +31,18 @@ class FileSearch(BaseModel): Note that the file search tool may output fewer than `max_num_results` results. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + ranking_options: Optional[FileSearchRankingOptions] = None + """The ranking options for the file search. + + If not specified, the file search tool will use the `auto` ranker and a + score_threshold of 0. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. """ diff --git a/portkey_ai/_vendor/openai/types/beta/file_search_tool_param.py b/portkey_ai/_vendor/openai/types/beta/file_search_tool_param.py index 666719f8..c73d0af7 100644 --- a/portkey_ai/_vendor/openai/types/beta/file_search_tool_param.py +++ b/portkey_ai/_vendor/openai/types/beta/file_search_tool_param.py @@ -4,7 +4,21 @@ from typing_extensions import Literal, Required, TypedDict -__all__ = ["FileSearchToolParam", "FileSearch"] +__all__ = ["FileSearchToolParam", "FileSearch", "FileSearchRankingOptions"] + + +class FileSearchRankingOptions(TypedDict, total=False): + score_threshold: Required[float] + """The score threshold for the file search. + + All values must be a floating point number between 0 and 1. + """ + + ranker: Literal["auto", "default_2024_08_21"] + """The ranker to use for the file search. + + If not specified will use the `auto` ranker. + """ class FileSearch(TypedDict, total=False): @@ -16,7 +30,18 @@ class FileSearch(TypedDict, total=False): Note that the file search tool may output fewer than `max_num_results` results. See the - [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + ranking_options: FileSearchRankingOptions + """The ranking options for the file search. + + If not specified, the file search tool will use the `auto` ranker and a + score_threshold of 0. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. """ diff --git a/portkey_ai/_vendor/openai/types/beta/function_tool_param.py b/portkey_ai/_vendor/openai/types/beta/function_tool_param.py index b44c0d47..d906e02b 100644 --- a/portkey_ai/_vendor/openai/types/beta/function_tool_param.py +++ b/portkey_ai/_vendor/openai/types/beta/function_tool_param.py @@ -4,13 +4,13 @@ from typing_extensions import Literal, Required, TypedDict -from ...types import shared_params +from ..shared_params.function_definition import FunctionDefinition __all__ = ["FunctionToolParam"] class FunctionToolParam(TypedDict, total=False): - function: Required[shared_params.FunctionDefinition] + function: Required[FunctionDefinition] type: Required[Literal["function"]] """The type of tool being defined: `function`""" diff --git a/portkey_ai/_vendor/openai/types/beta/other_file_chunking_strategy_object.py b/portkey_ai/_vendor/openai/types/beta/other_file_chunking_strategy_object.py new file mode 100644 index 00000000..89da560b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/other_file_chunking_strategy_object.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["OtherFileChunkingStrategyObject"] + + +class OtherFileChunkingStrategyObject(BaseModel): + type: Literal["other"] + """Always `other`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/__init__.py b/portkey_ai/_vendor/openai/types/beta/realtime/__init__.py new file mode 100644 index 00000000..372d4ec1 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/__init__.py @@ -0,0 +1,80 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .session import Session as Session +from .error_event import ErrorEvent as ErrorEvent +from .conversation_item import ConversationItem as ConversationItem +from .realtime_response import RealtimeResponse as RealtimeResponse +from .response_done_event import ResponseDoneEvent as ResponseDoneEvent +from .session_update_event import SessionUpdateEvent as SessionUpdateEvent +from .realtime_client_event import RealtimeClientEvent as RealtimeClientEvent +from .realtime_server_event import RealtimeServerEvent as RealtimeServerEvent +from .response_cancel_event import ResponseCancelEvent as ResponseCancelEvent +from .response_create_event import ResponseCreateEvent as ResponseCreateEvent +from .session_create_params import SessionCreateParams as SessionCreateParams +from .session_created_event import SessionCreatedEvent as SessionCreatedEvent +from .session_updated_event import SessionUpdatedEvent as SessionUpdatedEvent +from .response_created_event import ResponseCreatedEvent as ResponseCreatedEvent +from .conversation_item_param import ConversationItemParam as ConversationItemParam +from .realtime_connect_params import RealtimeConnectParams as RealtimeConnectParams +from .realtime_response_usage import RealtimeResponseUsage as RealtimeResponseUsage +from .session_create_response import SessionCreateResponse as SessionCreateResponse +from .realtime_response_status import RealtimeResponseStatus as RealtimeResponseStatus +from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent +from .conversation_item_content import ConversationItemContent as ConversationItemContent +from .rate_limits_updated_event import RateLimitsUpdatedEvent as RateLimitsUpdatedEvent +from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent +from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent +from .conversation_created_event import ConversationCreatedEvent as ConversationCreatedEvent +from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent +from .session_update_event_param import SessionUpdateEventParam as SessionUpdateEventParam +from .realtime_client_event_param import RealtimeClientEventParam as RealtimeClientEventParam +from .response_cancel_event_param import ResponseCancelEventParam as ResponseCancelEventParam +from .response_create_event_param import ResponseCreateEventParam as ResponseCreateEventParam +from .conversation_item_create_event import ConversationItemCreateEvent as ConversationItemCreateEvent +from .conversation_item_delete_event import ConversationItemDeleteEvent as ConversationItemDeleteEvent +from .input_audio_buffer_clear_event import InputAudioBufferClearEvent as InputAudioBufferClearEvent +from .conversation_item_content_param import ConversationItemContentParam as ConversationItemContentParam +from .conversation_item_created_event import ConversationItemCreatedEvent as ConversationItemCreatedEvent +from .conversation_item_deleted_event import ConversationItemDeletedEvent as ConversationItemDeletedEvent +from .input_audio_buffer_append_event import InputAudioBufferAppendEvent as InputAudioBufferAppendEvent +from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent as InputAudioBufferCommitEvent +from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent +from .conversation_item_truncate_event import ConversationItemTruncateEvent as ConversationItemTruncateEvent +from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent as InputAudioBufferClearedEvent +from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent +from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent +from .conversation_item_truncated_event import ConversationItemTruncatedEvent as ConversationItemTruncatedEvent +from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent +from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent +from .conversation_item_create_event_param import ConversationItemCreateEventParam as ConversationItemCreateEventParam +from .conversation_item_delete_event_param import ConversationItemDeleteEventParam as ConversationItemDeleteEventParam +from .input_audio_buffer_clear_event_param import InputAudioBufferClearEventParam as InputAudioBufferClearEventParam +from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent +from .input_audio_buffer_append_event_param import InputAudioBufferAppendEventParam as InputAudioBufferAppendEventParam +from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventParam as InputAudioBufferCommitEventParam +from .response_audio_transcript_delta_event import ( + ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, +) +from .conversation_item_truncate_event_param import ( + ConversationItemTruncateEventParam as ConversationItemTruncateEventParam, +) +from .input_audio_buffer_speech_started_event import ( + InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, +) +from .input_audio_buffer_speech_stopped_event import ( + InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, +) +from .response_function_call_arguments_done_event import ( + ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, +) +from .response_function_call_arguments_delta_event import ( + ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, +) +from .conversation_item_input_audio_transcription_failed_event import ( + ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, +) +from .conversation_item_input_audio_transcription_completed_event import ( + ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, +) diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_created_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_created_event.py new file mode 100644 index 00000000..4ba05408 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_created_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationCreatedEvent", "Conversation"] + + +class Conversation(BaseModel): + id: Optional[str] = None + """The unique ID of the conversation.""" + + object: Optional[Literal["realtime.conversation"]] = None + """The object type, must be `realtime.conversation`.""" + + +class ConversationCreatedEvent(BaseModel): + conversation: Conversation + """The conversation resource.""" + + event_id: str + """The unique ID of the server event.""" + + type: Literal["conversation.created"] + """The event type, must be `conversation.created`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item.py new file mode 100644 index 00000000..4edf6c4d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item.py @@ -0,0 +1,61 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item_content import ConversationItemContent + +__all__ = ["ConversationItem"] + + +class ConversationItem(BaseModel): + id: Optional[str] = None + """ + The unique ID of the item, this can be generated by the client to help manage + server-side context, but is not required because the server will generate one if + not provided. + """ + + arguments: Optional[str] = None + """The arguments of the function call (for `function_call` items).""" + + call_id: Optional[str] = None + """ + The ID of the function call (for `function_call` and `function_call_output` + items). If passed on a `function_call_output` item, the server will check that a + `function_call` item with the same ID exists in the conversation history. + """ + + content: Optional[List[ConversationItemContent]] = None + """The content of the message, applicable for `message` items. + + - Message items of role `system` support only `input_text` content + - Message items of role `user` support `input_text` and `input_audio` content + - Message items of role `assistant` support `text` content. + """ + + name: Optional[str] = None + """The name of the function being called (for `function_call` items).""" + + object: Optional[Literal["realtime.item"]] = None + """Identifier for the API object being returned - always `realtime.item`.""" + + output: Optional[str] = None + """The output of the function call (for `function_call_output` items).""" + + role: Optional[Literal["user", "assistant", "system"]] = None + """ + The role of the message sender (`user`, `assistant`, `system`), only applicable + for `message` items. + """ + + status: Optional[Literal["completed", "incomplete"]] = None + """The status of the item (`completed`, `incomplete`). + + These have no effect on the conversation, but are accepted for consistency with + the `conversation.item.created` event. + """ + + type: Optional[Literal["message", "function_call", "function_call_output"]] = None + """The type of the item (`message`, `function_call`, `function_call_output`).""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content.py new file mode 100644 index 00000000..b854aa0e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemContent"] + + +class ConversationItemContent(BaseModel): + id: Optional[str] = None + """ + ID of a previous conversation item (like a model response), used for + `item_reference` content types. + """ + + audio: Optional[str] = None + """Base64-encoded audio bytes, used for `input_audio` content type.""" + + text: Optional[str] = None + """The text content, used for `input_text` and `text` content types.""" + + transcript: Optional[str] = None + """The transcript of the audio, used for `input_audio` content type.""" + + type: Optional[Literal["input_text", "input_audio", "item_reference", "text"]] = None + """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content_param.py new file mode 100644 index 00000000..b354d789 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_content_param.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["ConversationItemContentParam"] + + +class ConversationItemContentParam(TypedDict, total=False): + id: str + """ + ID of a previous conversation item (like a model response), used for + `item_reference` content types. + """ + + audio: str + """Base64-encoded audio bytes, used for `input_audio` content type.""" + + text: str + """The text content, used for `input_text` and `text` content types.""" + + transcript: str + """The transcript of the audio, used for `input_audio` content type.""" + + type: Literal["input_text", "input_audio", "item_reference", "text"] + """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_create_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_create_event.py new file mode 100644 index 00000000..50d30967 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_create_event.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ConversationItemCreateEvent"] + + +class ConversationItemCreateEvent(BaseModel): + item: ConversationItem + """The item to add to the conversation.""" + + type: Literal["conversation.item.create"] + """The event type, must be `conversation.item.create`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" + + previous_item_id: Optional[str] = None + """The ID of the preceding item after which the new item will be inserted. + + If not set, the new item will be appended to the end of the conversation. If + set, it allows an item to be inserted mid-conversation. If the ID cannot be + found, an error will be returned and the item will not be added. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_create_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_create_event_param.py new file mode 100644 index 00000000..b8c8bbc2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_create_event_param.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .conversation_item_param import ConversationItemParam + +__all__ = ["ConversationItemCreateEventParam"] + + +class ConversationItemCreateEventParam(TypedDict, total=False): + item: Required[ConversationItemParam] + """The item to add to the conversation.""" + + type: Required[Literal["conversation.item.create"]] + """The event type, must be `conversation.item.create`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" + + previous_item_id: str + """The ID of the preceding item after which the new item will be inserted. + + If not set, the new item will be appended to the end of the conversation. If + set, it allows an item to be inserted mid-conversation. If the ID cannot be + found, an error will be returned and the item will not be added. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_created_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_created_event.py new file mode 100644 index 00000000..2f203882 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_created_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ConversationItemCreatedEvent"] + + +class ConversationItemCreatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """The item to add to the conversation.""" + + previous_item_id: str + """ + The ID of the preceding item in the Conversation context, allows the client to + understand the order of the conversation. + """ + + type: Literal["conversation.item.created"] + """The event type, must be `conversation.item.created`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_delete_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_delete_event.py new file mode 100644 index 00000000..02ca8250 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_delete_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemDeleteEvent"] + + +class ConversationItemDeleteEvent(BaseModel): + item_id: str + """The ID of the item to delete.""" + + type: Literal["conversation.item.delete"] + """The event type, must be `conversation.item.delete`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_delete_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_delete_event_param.py new file mode 100644 index 00000000..c3f88d66 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_delete_event_param.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ConversationItemDeleteEventParam"] + + +class ConversationItemDeleteEventParam(TypedDict, total=False): + item_id: Required[str] + """The ID of the item to delete.""" + + type: Required[Literal["conversation.item.delete"]] + """The event type, must be `conversation.item.delete`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_deleted_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_deleted_event.py new file mode 100644 index 00000000..a35a9781 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_deleted_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemDeletedEvent"] + + +class ConversationItemDeletedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item that was deleted.""" + + type: Literal["conversation.item.deleted"] + """The event type, must be `conversation.item.deleted`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py new file mode 100644 index 00000000..ded79cc0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemInputAudioTranscriptionCompletedEvent"] + + +class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel): + content_index: int + """The index of the content part containing the audio.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item containing the audio.""" + + transcript: str + """The transcribed text.""" + + type: Literal["conversation.item.input_audio_transcription.completed"] + """ + The event type, must be `conversation.item.input_audio_transcription.completed`. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py new file mode 100644 index 00000000..cecac93e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemInputAudioTranscriptionFailedEvent", "Error"] + + +class Error(BaseModel): + code: Optional[str] = None + """Error code, if any.""" + + message: Optional[str] = None + """A human-readable error message.""" + + param: Optional[str] = None + """Parameter related to the error, if any.""" + + type: Optional[str] = None + """The type of error.""" + + +class ConversationItemInputAudioTranscriptionFailedEvent(BaseModel): + content_index: int + """The index of the content part containing the audio.""" + + error: Error + """Details of the transcription error.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item.""" + + type: Literal["conversation.item.input_audio_transcription.failed"] + """The event type, must be `conversation.item.input_audio_transcription.failed`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_param.py new file mode 100644 index 00000000..ac0f8431 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_param.py @@ -0,0 +1,62 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Literal, TypedDict + +from .conversation_item_content_param import ConversationItemContentParam + +__all__ = ["ConversationItemParam"] + + +class ConversationItemParam(TypedDict, total=False): + id: str + """ + The unique ID of the item, this can be generated by the client to help manage + server-side context, but is not required because the server will generate one if + not provided. + """ + + arguments: str + """The arguments of the function call (for `function_call` items).""" + + call_id: str + """ + The ID of the function call (for `function_call` and `function_call_output` + items). If passed on a `function_call_output` item, the server will check that a + `function_call` item with the same ID exists in the conversation history. + """ + + content: Iterable[ConversationItemContentParam] + """The content of the message, applicable for `message` items. + + - Message items of role `system` support only `input_text` content + - Message items of role `user` support `input_text` and `input_audio` content + - Message items of role `assistant` support `text` content. + """ + + name: str + """The name of the function being called (for `function_call` items).""" + + object: Literal["realtime.item"] + """Identifier for the API object being returned - always `realtime.item`.""" + + output: str + """The output of the function call (for `function_call_output` items).""" + + role: Literal["user", "assistant", "system"] + """ + The role of the message sender (`user`, `assistant`, `system`), only applicable + for `message` items. + """ + + status: Literal["completed", "incomplete"] + """The status of the item (`completed`, `incomplete`). + + These have no effect on the conversation, but are accepted for consistency with + the `conversation.item.created` event. + """ + + type: Literal["message", "function_call", "function_call_output"] + """The type of the item (`message`, `function_call`, `function_call_output`).""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_truncate_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_truncate_event.py new file mode 100644 index 00000000..cb336bba --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_truncate_event.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemTruncateEvent"] + + +class ConversationItemTruncateEvent(BaseModel): + audio_end_ms: int + """Inclusive duration up to which audio is truncated, in milliseconds. + + If the audio_end_ms is greater than the actual audio duration, the server will + respond with an error. + """ + + content_index: int + """The index of the content part to truncate. Set this to 0.""" + + item_id: str + """The ID of the assistant message item to truncate. + + Only assistant message items can be truncated. + """ + + type: Literal["conversation.item.truncate"] + """The event type, must be `conversation.item.truncate`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_truncate_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_truncate_event_param.py new file mode 100644 index 00000000..d3ad1e1e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_truncate_event_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ConversationItemTruncateEventParam"] + + +class ConversationItemTruncateEventParam(TypedDict, total=False): + audio_end_ms: Required[int] + """Inclusive duration up to which audio is truncated, in milliseconds. + + If the audio_end_ms is greater than the actual audio duration, the server will + respond with an error. + """ + + content_index: Required[int] + """The index of the content part to truncate. Set this to 0.""" + + item_id: Required[str] + """The ID of the assistant message item to truncate. + + Only assistant message items can be truncated. + """ + + type: Required[Literal["conversation.item.truncate"]] + """The event type, must be `conversation.item.truncate`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_truncated_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_truncated_event.py new file mode 100644 index 00000000..36368fa2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/conversation_item_truncated_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ConversationItemTruncatedEvent"] + + +class ConversationItemTruncatedEvent(BaseModel): + audio_end_ms: int + """The duration up to which the audio was truncated, in milliseconds.""" + + content_index: int + """The index of the content part that was truncated.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the assistant message item that was truncated.""" + + type: Literal["conversation.item.truncated"] + """The event type, must be `conversation.item.truncated`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/error_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/error_event.py new file mode 100644 index 00000000..e020fc38 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/error_event.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ErrorEvent", "Error"] + + +class Error(BaseModel): + message: str + """A human-readable error message.""" + + type: str + """The type of error (e.g., "invalid_request_error", "server_error").""" + + code: Optional[str] = None + """Error code, if any.""" + + event_id: Optional[str] = None + """The event_id of the client event that caused the error, if applicable.""" + + param: Optional[str] = None + """Parameter related to the error, if any.""" + + +class ErrorEvent(BaseModel): + error: Error + """Details of the error.""" + + event_id: str + """The unique ID of the server event.""" + + type: Literal["error"] + """The event type, must be `error`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_append_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_append_event.py new file mode 100644 index 00000000..a253a648 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_append_event.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferAppendEvent"] + + +class InputAudioBufferAppendEvent(BaseModel): + audio: str + """Base64-encoded audio bytes. + + This must be in the format specified by the `input_audio_format` field in the + session configuration. + """ + + type: Literal["input_audio_buffer.append"] + """The event type, must be `input_audio_buffer.append`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_append_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_append_event_param.py new file mode 100644 index 00000000..3ad0bc73 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_append_event_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["InputAudioBufferAppendEventParam"] + + +class InputAudioBufferAppendEventParam(TypedDict, total=False): + audio: Required[str] + """Base64-encoded audio bytes. + + This must be in the format specified by the `input_audio_format` field in the + session configuration. + """ + + type: Required[Literal["input_audio_buffer.append"]] + """The event type, must be `input_audio_buffer.append`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_clear_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_clear_event.py new file mode 100644 index 00000000..b0624d34 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_clear_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferClearEvent"] + + +class InputAudioBufferClearEvent(BaseModel): + type: Literal["input_audio_buffer.clear"] + """The event type, must be `input_audio_buffer.clear`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py new file mode 100644 index 00000000..2bd6bc5a --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_clear_event_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["InputAudioBufferClearEventParam"] + + +class InputAudioBufferClearEventParam(TypedDict, total=False): + type: Required[Literal["input_audio_buffer.clear"]] + """The event type, must be `input_audio_buffer.clear`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_cleared_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_cleared_event.py new file mode 100644 index 00000000..632e1b94 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_cleared_event.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferClearedEvent"] + + +class InputAudioBufferClearedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + type: Literal["input_audio_buffer.cleared"] + """The event type, must be `input_audio_buffer.cleared`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_commit_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_commit_event.py new file mode 100644 index 00000000..7b6f5e46 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_commit_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferCommitEvent"] + + +class InputAudioBufferCommitEvent(BaseModel): + type: Literal["input_audio_buffer.commit"] + """The event type, must be `input_audio_buffer.commit`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py new file mode 100644 index 00000000..c9c927ab --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_commit_event_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["InputAudioBufferCommitEventParam"] + + +class InputAudioBufferCommitEventParam(TypedDict, total=False): + type: Required[Literal["input_audio_buffer.commit"]] + """The event type, must be `input_audio_buffer.commit`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_committed_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_committed_event.py new file mode 100644 index 00000000..3071eff3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_committed_event.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferCommittedEvent"] + + +class InputAudioBufferCommittedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item that will be created.""" + + previous_item_id: str + """The ID of the preceding item after which the new item will be inserted.""" + + type: Literal["input_audio_buffer.committed"] + """The event type, must be `input_audio_buffer.committed`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py new file mode 100644 index 00000000..4f3ab082 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_speech_started_event.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferSpeechStartedEvent"] + + +class InputAudioBufferSpeechStartedEvent(BaseModel): + audio_start_ms: int + """ + Milliseconds from the start of all audio written to the buffer during the + session when speech was first detected. This will correspond to the beginning of + audio sent to the model, and thus includes the `prefix_padding_ms` configured in + the Session. + """ + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item that will be created when speech stops.""" + + type: Literal["input_audio_buffer.speech_started"] + """The event type, must be `input_audio_buffer.speech_started`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py new file mode 100644 index 00000000..40568170 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["InputAudioBufferSpeechStoppedEvent"] + + +class InputAudioBufferSpeechStoppedEvent(BaseModel): + audio_end_ms: int + """Milliseconds since the session started when speech stopped. + + This will correspond to the end of audio sent to the model, and thus includes + the `min_silence_duration_ms` configured in the Session. + """ + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the user message item that will be created.""" + + type: Literal["input_audio_buffer.speech_stopped"] + """The event type, must be `input_audio_buffer.speech_stopped`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/rate_limits_updated_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/rate_limits_updated_event.py new file mode 100644 index 00000000..7e12283c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/rate_limits_updated_event.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["RateLimitsUpdatedEvent", "RateLimit"] + + +class RateLimit(BaseModel): + limit: Optional[int] = None + """The maximum allowed value for the rate limit.""" + + name: Optional[Literal["requests", "tokens"]] = None + """The name of the rate limit (`requests`, `tokens`).""" + + remaining: Optional[int] = None + """The remaining value before the limit is reached.""" + + reset_seconds: Optional[float] = None + """Seconds until the rate limit resets.""" + + +class RateLimitsUpdatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + rate_limits: List[RateLimit] + """List of rate limit information.""" + + type: Literal["rate_limits.updated"] + """The event type, must be `rate_limits.updated`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/realtime_client_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_client_event.py new file mode 100644 index 00000000..0769184c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_client_event.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ...._utils import PropertyInfo +from .session_update_event import SessionUpdateEvent +from .response_cancel_event import ResponseCancelEvent +from .response_create_event import ResponseCreateEvent +from .conversation_item_create_event import ConversationItemCreateEvent +from .conversation_item_delete_event import ConversationItemDeleteEvent +from .input_audio_buffer_clear_event import InputAudioBufferClearEvent +from .input_audio_buffer_append_event import InputAudioBufferAppendEvent +from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent +from .conversation_item_truncate_event import ConversationItemTruncateEvent + +__all__ = ["RealtimeClientEvent"] + +RealtimeClientEvent: TypeAlias = Annotated[ + Union[ + SessionUpdateEvent, + InputAudioBufferAppendEvent, + InputAudioBufferCommitEvent, + InputAudioBufferClearEvent, + ConversationItemCreateEvent, + ConversationItemTruncateEvent, + ConversationItemDeleteEvent, + ResponseCreateEvent, + ResponseCancelEvent, + ], + PropertyInfo(discriminator="type"), +] diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/realtime_client_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_client_event_param.py new file mode 100644 index 00000000..4020892c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_client_event_param.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .session_update_event_param import SessionUpdateEventParam +from .response_cancel_event_param import ResponseCancelEventParam +from .response_create_event_param import ResponseCreateEventParam +from .conversation_item_create_event_param import ConversationItemCreateEventParam +from .conversation_item_delete_event_param import ConversationItemDeleteEventParam +from .input_audio_buffer_clear_event_param import InputAudioBufferClearEventParam +from .input_audio_buffer_append_event_param import InputAudioBufferAppendEventParam +from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventParam +from .conversation_item_truncate_event_param import ConversationItemTruncateEventParam + +__all__ = ["RealtimeClientEventParam"] + +RealtimeClientEventParam: TypeAlias = Union[ + SessionUpdateEventParam, + InputAudioBufferAppendEventParam, + InputAudioBufferCommitEventParam, + InputAudioBufferClearEventParam, + ConversationItemCreateEventParam, + ConversationItemTruncateEventParam, + ConversationItemDeleteEventParam, + ResponseCreateEventParam, + ResponseCancelEventParam, +] diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/realtime_connect_params.py b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_connect_params.py new file mode 100644 index 00000000..76474f3d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_connect_params.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["RealtimeConnectParams"] + + +class RealtimeConnectParams(TypedDict, total=False): + model: Required[str] diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response.py b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response.py new file mode 100644 index 00000000..3e1b1406 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem +from .realtime_response_usage import RealtimeResponseUsage +from .realtime_response_status import RealtimeResponseStatus + +__all__ = ["RealtimeResponse"] + + +class RealtimeResponse(BaseModel): + id: Optional[str] = None + """The unique ID of the response.""" + + metadata: Optional[object] = None + """Developer-provided string key-value pairs associated with this response.""" + + object: Optional[Literal["realtime.response"]] = None + """The object type, must be `realtime.response`.""" + + output: Optional[List[ConversationItem]] = None + """The list of output items generated by the response.""" + + status: Optional[Literal["completed", "cancelled", "failed", "incomplete"]] = None + """ + The final status of the response (`completed`, `cancelled`, `failed`, or + `incomplete`). + """ + + status_details: Optional[RealtimeResponseStatus] = None + """Additional details about the status.""" + + usage: Optional[RealtimeResponseUsage] = None + """Usage statistics for the Response, this will correspond to billing. + + A Realtime API session will maintain a conversation context and append new Items + to the Conversation, thus output from previous turns (text and audio tokens) + will become the input for later turns. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response_status.py b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response_status.py new file mode 100644 index 00000000..7189cd58 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response_status.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["RealtimeResponseStatus", "Error"] + + +class Error(BaseModel): + code: Optional[str] = None + """Error code, if any.""" + + type: Optional[str] = None + """The type of error.""" + + +class RealtimeResponseStatus(BaseModel): + error: Optional[Error] = None + """ + A description of the error that caused the response to fail, populated when the + `status` is `failed`. + """ + + reason: Optional[Literal["turn_detected", "client_cancelled", "max_output_tokens", "content_filter"]] = None + """The reason the Response did not complete. + + For a `cancelled` Response, one of `turn_detected` (the server VAD detected a + new start of speech) or `client_cancelled` (the client sent a cancel event). For + an `incomplete` Response, one of `max_output_tokens` or `content_filter` (the + server-side safety filter activated and cut off the response). + """ + + type: Optional[Literal["completed", "cancelled", "incomplete", "failed"]] = None + """ + The type of error that caused the response to fail, corresponding with the + `status` field (`completed`, `cancelled`, `incomplete`, `failed`). + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response_usage.py b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response_usage.py new file mode 100644 index 00000000..7ca822e2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_response_usage.py @@ -0,0 +1,52 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["RealtimeResponseUsage", "InputTokenDetails", "OutputTokenDetails"] + + +class InputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """The number of audio tokens used in the Response.""" + + cached_tokens: Optional[int] = None + """The number of cached tokens used in the Response.""" + + text_tokens: Optional[int] = None + """The number of text tokens used in the Response.""" + + +class OutputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """The number of audio tokens used in the Response.""" + + text_tokens: Optional[int] = None + """The number of text tokens used in the Response.""" + + +class RealtimeResponseUsage(BaseModel): + input_token_details: Optional[InputTokenDetails] = None + """Details about the input tokens used in the Response.""" + + input_tokens: Optional[int] = None + """ + The number of input tokens used in the Response, including text and audio + tokens. + """ + + output_token_details: Optional[OutputTokenDetails] = None + """Details about the output tokens used in the Response.""" + + output_tokens: Optional[int] = None + """ + The number of output tokens sent in the Response, including text and audio + tokens. + """ + + total_tokens: Optional[int] = None + """ + The total number of tokens in the Response including input and output text and + audio tokens. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/realtime_server_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_server_event.py new file mode 100644 index 00000000..5f8ed55b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/realtime_server_event.py @@ -0,0 +1,72 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ...._utils import PropertyInfo +from .error_event import ErrorEvent +from .response_done_event import ResponseDoneEvent +from .session_created_event import SessionCreatedEvent +from .session_updated_event import SessionUpdatedEvent +from .response_created_event import ResponseCreatedEvent +from .response_text_done_event import ResponseTextDoneEvent +from .rate_limits_updated_event import RateLimitsUpdatedEvent +from .response_audio_done_event import ResponseAudioDoneEvent +from .response_text_delta_event import ResponseTextDeltaEvent +from .conversation_created_event import ConversationCreatedEvent +from .response_audio_delta_event import ResponseAudioDeltaEvent +from .conversation_item_created_event import ConversationItemCreatedEvent +from .conversation_item_deleted_event import ConversationItemDeletedEvent +from .response_output_item_done_event import ResponseOutputItemDoneEvent +from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent +from .response_content_part_done_event import ResponseContentPartDoneEvent +from .response_output_item_added_event import ResponseOutputItemAddedEvent +from .conversation_item_truncated_event import ConversationItemTruncatedEvent +from .response_content_part_added_event import ResponseContentPartAddedEvent +from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent +from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent +from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent +from .input_audio_buffer_speech_started_event import InputAudioBufferSpeechStartedEvent +from .input_audio_buffer_speech_stopped_event import InputAudioBufferSpeechStoppedEvent +from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent +from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent +from .conversation_item_input_audio_transcription_failed_event import ConversationItemInputAudioTranscriptionFailedEvent +from .conversation_item_input_audio_transcription_completed_event import ( + ConversationItemInputAudioTranscriptionCompletedEvent, +) + +__all__ = ["RealtimeServerEvent"] + +RealtimeServerEvent: TypeAlias = Annotated[ + Union[ + ErrorEvent, + SessionCreatedEvent, + SessionUpdatedEvent, + ConversationCreatedEvent, + InputAudioBufferCommittedEvent, + InputAudioBufferClearedEvent, + InputAudioBufferSpeechStartedEvent, + InputAudioBufferSpeechStoppedEvent, + ConversationItemCreatedEvent, + ConversationItemInputAudioTranscriptionCompletedEvent, + ConversationItemInputAudioTranscriptionFailedEvent, + ConversationItemTruncatedEvent, + ConversationItemDeletedEvent, + ResponseCreatedEvent, + ResponseDoneEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + RateLimitsUpdatedEvent, + ], + PropertyInfo(discriminator="type"), +] diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_delta_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_delta_event.py new file mode 100644 index 00000000..8e0128d9 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseAudioDeltaEvent"] + + +class ResponseAudioDeltaEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + delta: str + """Base64-encoded audio data delta.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.audio.delta"] + """The event type, must be `response.audio.delta`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_done_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_done_event.py new file mode 100644 index 00000000..68e78bc7 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_done_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseAudioDoneEvent"] + + +class ResponseAudioDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.audio.done"] + """The event type, must be `response.audio.done`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_transcript_delta_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_transcript_delta_event.py new file mode 100644 index 00000000..3609948d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_transcript_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseAudioTranscriptDeltaEvent"] + + +class ResponseAudioTranscriptDeltaEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + delta: str + """The transcript delta.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.audio_transcript.delta"] + """The event type, must be `response.audio_transcript.delta`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_transcript_done_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_transcript_done_event.py new file mode 100644 index 00000000..4e4436a9 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_audio_transcript_done_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseAudioTranscriptDoneEvent"] + + +class ResponseAudioTranscriptDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + transcript: str + """The final transcript of the audio.""" + + type: Literal["response.audio_transcript.done"] + """The event type, must be `response.audio_transcript.done`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_cancel_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_cancel_event.py new file mode 100644 index 00000000..c5ff991e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_cancel_event.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseCancelEvent"] + + +class ResponseCancelEvent(BaseModel): + type: Literal["response.cancel"] + """The event type, must be `response.cancel`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" + + response_id: Optional[str] = None + """ + A specific response ID to cancel - if not provided, will cancel an in-progress + response in the default conversation. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_cancel_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_cancel_event_param.py new file mode 100644 index 00000000..f3374073 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_cancel_event_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseCancelEventParam"] + + +class ResponseCancelEventParam(TypedDict, total=False): + type: Required[Literal["response.cancel"]] + """The event type, must be `response.cancel`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" + + response_id: str + """ + A specific response ID to cancel - if not provided, will cancel an in-progress + response in the default conversation. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_content_part_added_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_content_part_added_event.py new file mode 100644 index 00000000..45c8f20f --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_content_part_added_event.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseContentPartAddedEvent", "Part"] + + +class Part(BaseModel): + audio: Optional[str] = None + """Base64-encoded audio data (if type is "audio").""" + + text: Optional[str] = None + """The text content (if type is "text").""" + + transcript: Optional[str] = None + """The transcript of the audio (if type is "audio").""" + + type: Optional[Literal["text", "audio"]] = None + """The content type ("text", "audio").""" + + +class ResponseContentPartAddedEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item to which the content part was added.""" + + output_index: int + """The index of the output item in the response.""" + + part: Part + """The content part that was added.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.content_part.added"] + """The event type, must be `response.content_part.added`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_content_part_done_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_content_part_done_event.py new file mode 100644 index 00000000..3d161161 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_content_part_done_event.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseContentPartDoneEvent", "Part"] + + +class Part(BaseModel): + audio: Optional[str] = None + """Base64-encoded audio data (if type is "audio").""" + + text: Optional[str] = None + """The text content (if type is "text").""" + + transcript: Optional[str] = None + """The transcript of the audio (if type is "audio").""" + + type: Optional[Literal["text", "audio"]] = None + """The content type ("text", "audio").""" + + +class ResponseContentPartDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + part: Part + """The content part that is done.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.content_part.done"] + """The event type, must be `response.content_part.done`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event.py new file mode 100644 index 00000000..00ba1e5d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event.py @@ -0,0 +1,115 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ResponseCreateEvent", "Response", "ResponseTool"] + + +class ResponseTool(BaseModel): + description: Optional[str] = None + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: Optional[str] = None + """The name of the function.""" + + parameters: Optional[object] = None + """Parameters of the function in JSON Schema.""" + + type: Optional[Literal["function"]] = None + """The type of the tool, i.e. `function`.""" + + +class Response(BaseModel): + conversation: Union[str, Literal["auto", "none"], None] = None + """Controls which conversation the response is added to. + + Currently supports `auto` and `none`, with `auto` as the default value. The + `auto` value means that the contents of the response will be added to the + default conversation. Set this to `none` to create an out-of-band response which + will not add items to default conversation. + """ + + input: Optional[List[ConversationItem]] = None + """Input items to include in the prompt for the model. + + Creates a new context for this response, without including the default + conversation. Can include references to items from the default conversation. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + metadata: Optional[object] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format. Keys can be a maximum of 64 characters long and values can be + a maximum of 512 characters long. + """ + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: Optional[float] = None + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: Optional[str] = None + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Optional[List[ResponseTool]] = None + """Tools (functions) available to the model.""" + + voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ + + +class ResponseCreateEvent(BaseModel): + type: Literal["response.create"] + """The event type, must be `response.create`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" + + response: Optional[Response] = None + """Create a new Realtime response with these parameters""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event_param.py new file mode 100644 index 00000000..7c92b32d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_create_event_param.py @@ -0,0 +1,116 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +from .conversation_item_param import ConversationItemParam + +__all__ = ["ResponseCreateEventParam", "Response", "ResponseTool"] + + +class ResponseTool(TypedDict, total=False): + description: str + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: str + """The name of the function.""" + + parameters: object + """Parameters of the function in JSON Schema.""" + + type: Literal["function"] + """The type of the tool, i.e. `function`.""" + + +class Response(TypedDict, total=False): + conversation: Union[str, Literal["auto", "none"]] + """Controls which conversation the response is added to. + + Currently supports `auto` and `none`, with `auto` as the default value. The + `auto` value means that the contents of the response will be added to the + default conversation. Set this to `none` to create an out-of-band response which + will not add items to default conversation. + """ + + input: Iterable[ConversationItemParam] + """Input items to include in the prompt for the model. + + Creates a new context for this response, without including the default + conversation. Can include references to items from the default conversation. + """ + + instructions: str + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"]] + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + metadata: Optional[object] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format. Keys can be a maximum of 64 characters long and values can be + a maximum of 512 characters long. + """ + + modalities: List[Literal["text", "audio"]] + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: float + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: str + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Iterable[ResponseTool] + """Tools (functions) available to the model.""" + + voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ + + +class ResponseCreateEventParam(TypedDict, total=False): + type: Required[Literal["response.create"]] + """The event type, must be `response.create`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" + + response: Response + """Create a new Realtime response with these parameters""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_created_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_created_event.py new file mode 100644 index 00000000..a4990cf0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_created_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel +from .realtime_response import RealtimeResponse + +__all__ = ["ResponseCreatedEvent"] + + +class ResponseCreatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + response: RealtimeResponse + """The response resource.""" + + type: Literal["response.created"] + """The event type, must be `response.created`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_done_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_done_event.py new file mode 100644 index 00000000..9e655184 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_done_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel +from .realtime_response import RealtimeResponse + +__all__ = ["ResponseDoneEvent"] + + +class ResponseDoneEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + response: RealtimeResponse + """The response resource.""" + + type: Literal["response.done"] + """The event type, must be `response.done`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_function_call_arguments_delta_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_function_call_arguments_delta_event.py new file mode 100644 index 00000000..cdbb64e6 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_function_call_arguments_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseFunctionCallArgumentsDeltaEvent"] + + +class ResponseFunctionCallArgumentsDeltaEvent(BaseModel): + call_id: str + """The ID of the function call.""" + + delta: str + """The arguments delta as a JSON string.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the function call item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.function_call_arguments.delta"] + """The event type, must be `response.function_call_arguments.delta`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_function_call_arguments_done_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_function_call_arguments_done_event.py new file mode 100644 index 00000000..0a5db533 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_function_call_arguments_done_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseFunctionCallArgumentsDoneEvent"] + + +class ResponseFunctionCallArgumentsDoneEvent(BaseModel): + arguments: str + """The final arguments as a JSON string.""" + + call_id: str + """The ID of the function call.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the function call item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.function_call_arguments.done"] + """The event type, must be `response.function_call_arguments.done`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_output_item_added_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_output_item_added_event.py new file mode 100644 index 00000000..c89bfdc3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_output_item_added_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ResponseOutputItemAddedEvent"] + + +class ResponseOutputItemAddedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """The item to add to the conversation.""" + + output_index: int + """The index of the output item in the Response.""" + + response_id: str + """The ID of the Response to which the item belongs.""" + + type: Literal["response.output_item.added"] + """The event type, must be `response.output_item.added`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_output_item_done_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_output_item_done_event.py new file mode 100644 index 00000000..b5910e22 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_output_item_done_event.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ResponseOutputItemDoneEvent"] + + +class ResponseOutputItemDoneEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + item: ConversationItem + """The item to add to the conversation.""" + + output_index: int + """The index of the output item in the Response.""" + + response_id: str + """The ID of the Response to which the item belongs.""" + + type: Literal["response.output_item.done"] + """The event type, must be `response.output_item.done`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_text_delta_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_text_delta_event.py new file mode 100644 index 00000000..c463b3c3 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_text_delta_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseTextDeltaEvent"] + + +class ResponseTextDeltaEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + delta: str + """The text delta.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + type: Literal["response.text.delta"] + """The event type, must be `response.text.delta`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/response_text_done_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/response_text_done_event.py new file mode 100644 index 00000000..020ff41d --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/response_text_done_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ResponseTextDoneEvent"] + + +class ResponseTextDoneEvent(BaseModel): + content_index: int + """The index of the content part in the item's content array.""" + + event_id: str + """The unique ID of the server event.""" + + item_id: str + """The ID of the item.""" + + output_index: int + """The index of the output item in the response.""" + + response_id: str + """The ID of the response.""" + + text: str + """The final text content.""" + + type: Literal["response.text.done"] + """The event type, must be `response.text.done`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session.py b/portkey_ai/_vendor/openai/types/beta/realtime/session.py new file mode 100644 index 00000000..09cdbb02 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session.py @@ -0,0 +1,148 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["Session", "InputAudioTranscription", "Tool", "TurnDetection"] + + +class InputAudioTranscription(BaseModel): + model: Optional[str] = None + """ + The model to use for transcription, `whisper-1` is the only currently supported + model. + """ + + +class Tool(BaseModel): + description: Optional[str] = None + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: Optional[str] = None + """The name of the function.""" + + parameters: Optional[object] = None + """Parameters of the function in JSON Schema.""" + + type: Optional[Literal["function"]] = None + """The type of the tool, i.e. `function`.""" + + +class TurnDetection(BaseModel): + prefix_padding_ms: Optional[int] = None + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: Optional[float] = None + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: Optional[Literal["server_vad"]] = None + """Type of turn detection, only `server_vad` is currently supported.""" + + +class Session(BaseModel): + id: Optional[str] = None + """Unique identifier for the session object.""" + + input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: Optional[InputAudioTranscription] = None + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + model: Union[ + str, + Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ], + None, + ] = None + """The Realtime model used for this session.""" + + output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: Optional[float] = None + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: Optional[str] = None + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Optional[List[Tool]] = None + """Tools (functions) available to the model.""" + + turn_detection: Optional[TurnDetection] = None + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session_create_params.py b/portkey_ai/_vendor/openai/types/beta/realtime/session_create_params.py new file mode 100644 index 00000000..f56f2c5c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session_create_params.py @@ -0,0 +1,149 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["SessionCreateParams", "InputAudioTranscription", "Tool", "TurnDetection"] + + +class SessionCreateParams(TypedDict, total=False): + model: Required[ + Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + ] + """The Realtime model used for this session.""" + + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: InputAudioTranscription + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + """ + + instructions: str + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"]] + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: List[Literal["text", "audio"]] + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: float + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: str + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Iterable[Tool] + """Tools (functions) available to the model.""" + + turn_detection: TurnDetection + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ + + +class InputAudioTranscription(TypedDict, total=False): + model: str + """ + The model to use for transcription, `whisper-1` is the only currently supported + model. + """ + + +class Tool(TypedDict, total=False): + description: str + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: str + """The name of the function.""" + + parameters: object + """Parameters of the function in JSON Schema.""" + + type: Literal["function"] + """The type of the tool, i.e. `function`.""" + + +class TurnDetection(TypedDict, total=False): + create_response: bool + """Whether or not to automatically generate a response when VAD is enabled. + + `true` by default. + """ + + prefix_padding_ms: int + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: int + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: float + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: str + """Type of turn detection, only `server_vad` is currently supported.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session_create_response.py b/portkey_ai/_vendor/openai/types/beta/realtime/session_create_response.py new file mode 100644 index 00000000..31f591b2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session_create_response.py @@ -0,0 +1,150 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["SessionCreateResponse", "ClientSecret", "InputAudioTranscription", "Tool", "TurnDetection"] + + +class ClientSecret(BaseModel): + expires_at: Optional[int] = None + """Timestamp for when the token expires. + + Currently, all tokens expire after one minute. + """ + + value: Optional[str] = None + """ + Ephemeral key usable in client environments to authenticate connections to the + Realtime API. Use this in client-side environments rather than a standard API + token, which should only be used server-side. + """ + + +class InputAudioTranscription(BaseModel): + model: Optional[str] = None + """ + The model to use for transcription, `whisper-1` is the only currently supported + model. + """ + + +class Tool(BaseModel): + description: Optional[str] = None + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: Optional[str] = None + """The name of the function.""" + + parameters: Optional[object] = None + """Parameters of the function in JSON Schema.""" + + type: Optional[Literal["function"]] = None + """The type of the tool, i.e. `function`.""" + + +class TurnDetection(BaseModel): + prefix_padding_ms: Optional[int] = None + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: Optional[float] = None + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: Optional[str] = None + """Type of turn detection, only `server_vad` is currently supported.""" + + +class SessionCreateResponse(BaseModel): + client_secret: Optional[ClientSecret] = None + """Ephemeral key returned by the API.""" + + input_audio_format: Optional[str] = None + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: Optional[InputAudioTranscription] = None + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Optional[str] = None + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: Optional[float] = None + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: Optional[str] = None + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Optional[List[Tool]] = None + """Tools (functions) available to the model.""" + + turn_detection: Optional[TurnDetection] = None + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session_created_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/session_created_event.py new file mode 100644 index 00000000..baf6af38 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session_created_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .session import Session +from ...._models import BaseModel + +__all__ = ["SessionCreatedEvent"] + + +class SessionCreatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + session: Session + """Realtime session object configuration.""" + + type: Literal["session.created"] + """The event type, must be `session.created`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event.py new file mode 100644 index 00000000..c04220aa --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event.py @@ -0,0 +1,158 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["SessionUpdateEvent", "Session", "SessionInputAudioTranscription", "SessionTool", "SessionTurnDetection"] + + +class SessionInputAudioTranscription(BaseModel): + model: Optional[str] = None + """ + The model to use for transcription, `whisper-1` is the only currently supported + model. + """ + + +class SessionTool(BaseModel): + description: Optional[str] = None + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: Optional[str] = None + """The name of the function.""" + + parameters: Optional[object] = None + """Parameters of the function in JSON Schema.""" + + type: Optional[Literal["function"]] = None + """The type of the tool, i.e. `function`.""" + + +class SessionTurnDetection(BaseModel): + create_response: Optional[bool] = None + """Whether or not to automatically generate a response when VAD is enabled. + + `true` by default. + """ + + prefix_padding_ms: Optional[int] = None + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: Optional[int] = None + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: Optional[float] = None + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: Optional[str] = None + """Type of turn detection, only `server_vad` is currently supported.""" + + +class Session(BaseModel): + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + """The Realtime model used for this session.""" + + input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: Optional[SessionInputAudioTranscription] = None + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + """ + + instructions: Optional[str] = None + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: Optional[float] = None + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: Optional[str] = None + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Optional[List[SessionTool]] = None + """Tools (functions) available to the model.""" + + turn_detection: Optional[SessionTurnDetection] = None + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ + + +class SessionUpdateEvent(BaseModel): + session: Session + """Realtime session object configuration.""" + + type: Literal["session.update"] + """The event type, must be `session.update`.""" + + event_id: Optional[str] = None + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event_param.py b/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event_param.py new file mode 100644 index 00000000..aa06069b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session_update_event_param.py @@ -0,0 +1,166 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = [ + "SessionUpdateEventParam", + "Session", + "SessionInputAudioTranscription", + "SessionTool", + "SessionTurnDetection", +] + + +class SessionInputAudioTranscription(TypedDict, total=False): + model: str + """ + The model to use for transcription, `whisper-1` is the only currently supported + model. + """ + + +class SessionTool(TypedDict, total=False): + description: str + """ + The description of the function, including guidance on when and how to call it, + and guidance about what to tell the user when calling (if anything). + """ + + name: str + """The name of the function.""" + + parameters: object + """Parameters of the function in JSON Schema.""" + + type: Literal["function"] + """The type of the tool, i.e. `function`.""" + + +class SessionTurnDetection(TypedDict, total=False): + create_response: bool + """Whether or not to automatically generate a response when VAD is enabled. + + `true` by default. + """ + + prefix_padding_ms: int + """Amount of audio to include before the VAD detected speech (in milliseconds). + + Defaults to 300ms. + """ + + silence_duration_ms: int + """Duration of silence to detect speech stop (in milliseconds). + + Defaults to 500ms. With shorter values the model will respond more quickly, but + may jump in on short pauses from the user. + """ + + threshold: float + """Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + type: str + """Type of turn detection, only `server_vad` is currently supported.""" + + +class Session(TypedDict, total=False): + model: Required[ + Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + ] + """The Realtime model used for this session.""" + + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + input_audio_transcription: SessionInputAudioTranscription + """ + Configuration for input audio transcription, defaults to off and can be set to + `null` to turn off once on. Input audio transcription is not native to the + model, since the model consumes audio directly. Transcription runs + asynchronously through Whisper and should be treated as rough guidance rather + than the representation understood by the model. + """ + + instructions: str + """The default system instructions (i.e. + + system message) prepended to model calls. This field allows the client to guide + the model on desired responses. The model can be instructed on response content + and format, (e.g. "be extremely succinct", "act friendly", "here are examples of + good responses") and on audio behavior (e.g. "talk quickly", "inject emotion + into your voice", "laugh frequently"). The instructions are not guaranteed to be + followed by the model, but they provide guidance to the model on the desired + behavior. + + Note that the server sets default instructions which will be used if this field + is not set and are visible in the `session.created` event at the start of the + session. + """ + + max_response_output_tokens: Union[int, Literal["inf"]] + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + `inf` for the maximum available tokens for a given model. Defaults to `inf`. + """ + + modalities: List[Literal["text", "audio"]] + """The set of modalities the model can respond with. + + To disable audio, set this to ["text"]. + """ + + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + + temperature: float + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + + tool_choice: str + """How the model chooses tools. + + Options are `auto`, `none`, `required`, or specify a function. + """ + + tools: Iterable[SessionTool] + """Tools (functions) available to the model.""" + + turn_detection: SessionTurnDetection + """Configuration for turn detection. + + Can be set to `null` to turn off. Server VAD means that the model will detect + the start and end of speech based on audio volume and respond at the end of user + speech. + """ + + voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] + """The voice the model uses to respond. + + Voice cannot be changed during the session once the model has responded with + audio at least once. Current voice options are `alloy`, `ash`, `ballad`, + `coral`, `echo` `sage`, `shimmer` and `verse`. + """ + + +class SessionUpdateEventParam(TypedDict, total=False): + session: Required[Session] + """Realtime session object configuration.""" + + type: Required[Literal["session.update"]] + """The event type, must be `session.update`.""" + + event_id: str + """Optional client-generated ID used to identify this event.""" diff --git a/portkey_ai/_vendor/openai/types/beta/realtime/session_updated_event.py b/portkey_ai/_vendor/openai/types/beta/realtime/session_updated_event.py new file mode 100644 index 00000000..b9b6488e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/realtime/session_updated_event.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .session import Session +from ...._models import BaseModel + +__all__ = ["SessionUpdatedEvent"] + + +class SessionUpdatedEvent(BaseModel): + event_id: str + """The unique ID of the server event.""" + + session: Session + """Realtime session object configuration.""" + + type: Literal["session.updated"] + """The event type, must be `session.updated`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/static_file_chunking_strategy.py b/portkey_ai/_vendor/openai/types/beta/static_file_chunking_strategy.py new file mode 100644 index 00000000..60800935 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/static_file_chunking_strategy.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + +from ..._models import BaseModel + +__all__ = ["StaticFileChunkingStrategy"] + + +class StaticFileChunkingStrategy(BaseModel): + chunk_overlap_tokens: int + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: int + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/static_file_chunking_strategy_object.py b/portkey_ai/_vendor/openai/types/beta/static_file_chunking_strategy_object.py new file mode 100644 index 00000000..896c4b83 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/static_file_chunking_strategy_object.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .static_file_chunking_strategy import StaticFileChunkingStrategy + +__all__ = ["StaticFileChunkingStrategyObject"] + + +class StaticFileChunkingStrategyObject(BaseModel): + static: StaticFileChunkingStrategy + + type: Literal["static"] + """Always `static`.""" diff --git a/portkey_ai/_vendor/openai/types/beta/static_file_chunking_strategy_param.py b/portkey_ai/_vendor/openai/types/beta/static_file_chunking_strategy_param.py new file mode 100644 index 00000000..f917ac56 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/static_file_chunking_strategy_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["StaticFileChunkingStrategyParam"] + + +class StaticFileChunkingStrategyParam(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/thread.py b/portkey_ai/_vendor/openai/types/beta/thread.py index 6f7a6c7d..37d50ccb 100644 --- a/portkey_ai/_vendor/openai/types/beta/thread.py +++ b/portkey_ai/_vendor/openai/types/beta/thread.py @@ -45,7 +45,7 @@ class Thread(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ object: Literal["thread"] diff --git a/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py b/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py index 7490b25e..8310ba12 100644 --- a/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py +++ b/portkey_ai/_vendor/openai/types/beta/thread_create_and_run_params.py @@ -9,6 +9,7 @@ from .function_tool_param import FunctionToolParam from .file_search_tool_param import FileSearchToolParam from .code_interpreter_tool_param import CodeInterpreterToolParam +from .file_chunking_strategy_param import FileChunkingStrategyParam from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from .threads.message_content_part_param import MessageContentPartParam from .assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -24,10 +25,6 @@ "ThreadToolResourcesCodeInterpreter", "ThreadToolResourcesFileSearch", "ThreadToolResourcesFileSearchVectorStore", - "ThreadToolResourcesFileSearchVectorStoreChunkingStrategy", - "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto", - "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic", - "ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", @@ -75,7 +72,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ model: Union[str, ChatModel, None] @@ -89,23 +86,23 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. """ response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -205,7 +202,7 @@ class ThreadMessage(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ @@ -218,44 +215,12 @@ class ThreadToolResourcesCodeInterpreter(TypedDict, total=False): """ -class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): - static: Required[ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ThreadToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ - ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto, - ThreadToolResourcesFileSearchVectorStoreChunkingStrategyStatic, -] - - class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: ThreadToolResourcesFileSearchVectorStoreChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. """ file_ids: List[str] @@ -270,7 +235,7 @@ class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False): This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can - be a maxium of 512 characters long. + be a maximum of 512 characters long. """ @@ -310,7 +275,7 @@ class Thread(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ tool_resources: Optional[ThreadToolResources] @@ -367,7 +332,7 @@ class TruncationStrategy(TypedDict, total=False): """ -class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase): +class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase, total=False): stream: Optional[Literal[False]] """ If `true`, returns a stream of events that happen during the Run as server-sent diff --git a/portkey_ai/_vendor/openai/types/beta/thread_create_params.py b/portkey_ai/_vendor/openai/types/beta/thread_create_params.py index f9561aa4..3ac6c7d6 100644 --- a/portkey_ai/_vendor/openai/types/beta/thread_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/thread_create_params.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from .code_interpreter_tool_param import CodeInterpreterToolParam +from .file_chunking_strategy_param import FileChunkingStrategyParam from .threads.message_content_part_param import MessageContentPartParam __all__ = [ @@ -18,10 +19,6 @@ "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", "ToolResourcesFileSearchVectorStore", - "ToolResourcesFileSearchVectorStoreChunkingStrategy", - "ToolResourcesFileSearchVectorStoreChunkingStrategyAuto", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStatic", - "ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic", ] @@ -37,7 +34,7 @@ class ThreadCreateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ tool_resources: Optional[ToolResources] @@ -86,7 +83,7 @@ class Message(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ @@ -99,43 +96,12 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): """ -class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False): - static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[ - ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic -] - - class ToolResourcesFileSearchVectorStore(TypedDict, total=False): - chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. """ file_ids: List[str] @@ -150,7 +116,7 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can - be a maxium of 512 characters long. + be a maximum of 512 characters long. """ diff --git a/portkey_ai/_vendor/openai/types/beta/thread_update_params.py b/portkey_ai/_vendor/openai/types/beta/thread_update_params.py index 7210ab77..78c5ec4f 100644 --- a/portkey_ai/_vendor/openai/types/beta/thread_update_params.py +++ b/portkey_ai/_vendor/openai/types/beta/thread_update_params.py @@ -14,7 +14,7 @@ class ThreadUpdateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ tool_resources: Optional[ToolResources] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/message.py b/portkey_ai/_vendor/openai/types/beta/threads/message.py index 298a1d42..63c5c480 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/message.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/message.py @@ -71,7 +71,7 @@ class Message(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ object: Literal["thread.message"] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/message_create_params.py b/portkey_ai/_vendor/openai/types/beta/threads/message_create_params.py index 2b450deb..2c4edfdf 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/message_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/message_create_params.py @@ -32,7 +32,7 @@ class MessageCreateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ diff --git a/portkey_ai/_vendor/openai/types/beta/threads/message_list_params.py b/portkey_ai/_vendor/openai/types/beta/threads/message_list_params.py index 18c2442f..a7c22a66 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/message_list_params.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/message_list_params.py @@ -21,7 +21,7 @@ class MessageListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/portkey_ai/_vendor/openai/types/beta/threads/message_update_params.py b/portkey_ai/_vendor/openai/types/beta/threads/message_update_params.py index 7000f331..e8f8cc91 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/message_update_params.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/message_update_params.py @@ -16,5 +16,5 @@ class MessageUpdateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ diff --git a/portkey_ai/_vendor/openai/types/beta/threads/run.py b/portkey_ai/_vendor/openai/types/beta/threads/run.py index 0579e229..ad32135b 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/run.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/run.py @@ -138,7 +138,7 @@ class Run(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ model: str @@ -154,7 +154,7 @@ class Run(BaseModel): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. """ @@ -167,16 +167,16 @@ class Run(BaseModel): response_format: Optional[AssistantResponseFormatOption] = None """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py b/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py index d3e6d9c4..88dc3964 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/run_create_params.py @@ -2,11 +2,12 @@ from __future__ import annotations -from typing import Union, Iterable, Optional +from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict from ...chat_model import ChatModel from ..assistant_tool_param import AssistantToolParam +from .runs.run_step_include import RunStepInclude from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam @@ -32,6 +33,18 @@ class RunCreateParamsBase(TypedDict, total=False): execute this run. """ + include: List[RunStepInclude] + """A list of additional fields to include in the response. + + Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + additional_instructions: Optional[str] """Appends additional instructions at the end of the instructions for the run. @@ -72,7 +85,7 @@ class RunCreateParamsBase(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ model: Union[str, ChatModel, None] @@ -86,23 +99,23 @@ class RunCreateParamsBase(TypedDict, total=False): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. """ response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which guarantees the model will match your supplied JSON schema. Learn - more in the + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -191,7 +204,7 @@ class AdditionalMessage(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ @@ -212,7 +225,7 @@ class TruncationStrategy(TypedDict, total=False): """ -class RunCreateParamsNonStreaming(RunCreateParamsBase): +class RunCreateParamsNonStreaming(RunCreateParamsBase, total=False): stream: Optional[Literal[False]] """ If `true`, returns a stream of events that happen during the Run as server-sent diff --git a/portkey_ai/_vendor/openai/types/beta/threads/run_list_params.py b/portkey_ai/_vendor/openai/types/beta/threads/run_list_params.py index 1e32bca4..fbea54f6 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/run_list_params.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/run_list_params.py @@ -21,7 +21,7 @@ class RunListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/portkey_ai/_vendor/openai/types/beta/threads/run_submit_tool_outputs_params.py b/portkey_ai/_vendor/openai/types/beta/threads/run_submit_tool_outputs_params.py index ccb5e5e9..14772860 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/run_submit_tool_outputs_params.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/run_submit_tool_outputs_params.py @@ -31,7 +31,7 @@ class ToolOutput(TypedDict, total=False): """ -class RunSubmitToolOutputsParamsNonStreaming(RunSubmitToolOutputsParamsBase): +class RunSubmitToolOutputsParamsNonStreaming(RunSubmitToolOutputsParamsBase, total=False): stream: Optional[Literal[False]] """ If `true`, returns a stream of events that happen during the Run as server-sent diff --git a/portkey_ai/_vendor/openai/types/beta/threads/run_update_params.py b/portkey_ai/_vendor/openai/types/beta/threads/run_update_params.py index e595eac8..cb4f0536 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/run_update_params.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/run_update_params.py @@ -16,5 +16,5 @@ class RunUpdateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/__init__.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/__init__.py index a312ce3d..467d5d79 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/__init__.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/__init__.py @@ -6,9 +6,11 @@ from .tool_call import ToolCall as ToolCall from .run_step_delta import RunStepDelta as RunStepDelta from .tool_call_delta import ToolCallDelta as ToolCallDelta +from .run_step_include import RunStepInclude as RunStepInclude from .step_list_params import StepListParams as StepListParams from .function_tool_call import FunctionToolCall as FunctionToolCall from .run_step_delta_event import RunStepDeltaEvent as RunStepDeltaEvent +from .step_retrieve_params import StepRetrieveParams as StepRetrieveParams from .code_interpreter_logs import CodeInterpreterLogs as CodeInterpreterLogs from .file_search_tool_call import FileSearchToolCall as FileSearchToolCall from .tool_call_delta_object import ToolCallDeltaObject as ToolCallDeltaObject diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/file_search_tool_call.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/file_search_tool_call.py index 57c0ca9a..da4d58dc 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/file_search_tool_call.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/file_search_tool_call.py @@ -1,17 +1,71 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List, Optional from typing_extensions import Literal from ....._models import BaseModel -__all__ = ["FileSearchToolCall"] +__all__ = [ + "FileSearchToolCall", + "FileSearch", + "FileSearchRankingOptions", + "FileSearchResult", + "FileSearchResultContent", +] + + +class FileSearchRankingOptions(BaseModel): + ranker: Literal["default_2024_08_21"] + """The ranker used for the file search.""" + + score_threshold: float + """The score threshold for the file search. + + All values must be a floating point number between 0 and 1. + """ + + +class FileSearchResultContent(BaseModel): + text: Optional[str] = None + """The text content of the file.""" + + type: Optional[Literal["text"]] = None + """The type of the content.""" + + +class FileSearchResult(BaseModel): + file_id: str + """The ID of the file that result was found in.""" + + file_name: str + """The name of the file that result was found in.""" + + score: float + """The score of the result. + + All values must be a floating point number between 0 and 1. + """ + + content: Optional[List[FileSearchResultContent]] = None + """The content of the result that was found. + + The content is only included if requested via the include query parameter. + """ + + +class FileSearch(BaseModel): + ranking_options: Optional[FileSearchRankingOptions] = None + """The ranking options for the file search.""" + + results: Optional[List[FileSearchResult]] = None + """The results of the file search.""" class FileSearchToolCall(BaseModel): id: str """The ID of the tool call object.""" - file_search: object + file_search: FileSearch """For now, this is always going to be an empty object.""" type: Literal["file_search"] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step.py index e3163c50..0445ae36 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step.py @@ -75,7 +75,7 @@ class RunStep(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ object: Literal["thread.run.step"] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_include.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_include.py new file mode 100644 index 00000000..8e76c1b7 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/run_step_include.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["RunStepInclude"] + +RunStepInclude: TypeAlias = Literal["step_details.tool_calls[*].file_search.results[*].content"] diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/step_list_params.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/step_list_params.py index 606d4445..a6be771d 100644 --- a/portkey_ai/_vendor/openai/types/beta/threads/runs/step_list_params.py +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/step_list_params.py @@ -2,8 +2,11 @@ from __future__ import annotations +from typing import List from typing_extensions import Literal, Required, TypedDict +from .run_step_include import RunStepInclude + __all__ = ["StepListParams"] @@ -23,11 +26,23 @@ class StepListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ + include: List[RunStepInclude] + """A list of additional fields to include in the response. + + Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + limit: int """A limit on the number of objects to be returned. diff --git a/portkey_ai/_vendor/openai/types/beta/threads/runs/step_retrieve_params.py b/portkey_ai/_vendor/openai/types/beta/threads/runs/step_retrieve_params.py new file mode 100644 index 00000000..ecbb72ed --- /dev/null +++ b/portkey_ai/_vendor/openai/types/beta/threads/runs/step_retrieve_params.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Required, TypedDict + +from .run_step_include import RunStepInclude + +__all__ = ["StepRetrieveParams"] + + +class StepRetrieveParams(TypedDict, total=False): + thread_id: Required[str] + + run_id: Required[str] + + include: List[RunStepInclude] + """A list of additional fields to include in the response. + + Currently the only supported value is + `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + search result content. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ diff --git a/portkey_ai/_vendor/openai/types/beta/vector_store.py b/portkey_ai/_vendor/openai/types/beta/vector_store.py index 488961b4..2d3ceea8 100644 --- a/portkey_ai/_vendor/openai/types/beta/vector_store.py +++ b/portkey_ai/_vendor/openai/types/beta/vector_store.py @@ -53,7 +53,7 @@ class VectorStore(BaseModel): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ name: str diff --git a/portkey_ai/_vendor/openai/types/beta/vector_store_create_params.py b/portkey_ai/_vendor/openai/types/beta/vector_store_create_params.py index 4f74af49..4fc7c389 100644 --- a/portkey_ai/_vendor/openai/types/beta/vector_store_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/vector_store_create_params.py @@ -2,21 +2,16 @@ from __future__ import annotations -from typing import List, Union, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict +from typing import List, Optional +from typing_extensions import Literal, Required, TypedDict -__all__ = [ - "VectorStoreCreateParams", - "ChunkingStrategy", - "ChunkingStrategyAuto", - "ChunkingStrategyStatic", - "ChunkingStrategyStaticStatic", - "ExpiresAfter", -] +from .file_chunking_strategy_param import FileChunkingStrategyParam + +__all__ = ["VectorStoreCreateParams", "ExpiresAfter"] class VectorStoreCreateParams(TypedDict, total=False): - chunking_strategy: ChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is @@ -38,43 +33,13 @@ class VectorStoreCreateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ name: str """The name of the vector store.""" -class ChunkingStrategyAuto(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ChunkingStrategyStaticStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ChunkingStrategyStatic(TypedDict, total=False): - static: Required[ChunkingStrategyStaticStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ChunkingStrategy: TypeAlias = Union[ChunkingStrategyAuto, ChunkingStrategyStatic] - - class ExpiresAfter(TypedDict, total=False): anchor: Required[Literal["last_active_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/portkey_ai/_vendor/openai/types/beta/vector_store_list_params.py b/portkey_ai/_vendor/openai/types/beta/vector_store_list_params.py index f39f6726..e26ff90a 100644 --- a/portkey_ai/_vendor/openai/types/beta/vector_store_list_params.py +++ b/portkey_ai/_vendor/openai/types/beta/vector_store_list_params.py @@ -21,7 +21,7 @@ class VectorStoreListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/portkey_ai/_vendor/openai/types/beta/vector_store_update_params.py b/portkey_ai/_vendor/openai/types/beta/vector_store_update_params.py index 0f9593e4..ff6c068e 100644 --- a/portkey_ai/_vendor/openai/types/beta/vector_store_update_params.py +++ b/portkey_ai/_vendor/openai/types/beta/vector_store_update_params.py @@ -17,7 +17,7 @@ class VectorStoreUpdateParams(TypedDict, total=False): This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be - a maxium of 512 characters long. + a maximum of 512 characters long. """ name: Optional[str] diff --git a/portkey_ai/_vendor/openai/types/beta/vector_stores/file_batch_create_params.py b/portkey_ai/_vendor/openai/types/beta/vector_stores/file_batch_create_params.py index e1c3303c..e42ea99c 100644 --- a/portkey_ai/_vendor/openai/types/beta/vector_stores/file_batch_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/vector_stores/file_batch_create_params.py @@ -2,16 +2,12 @@ from __future__ import annotations -from typing import List, Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict +from typing import List +from typing_extensions import Required, TypedDict -__all__ = [ - "FileBatchCreateParams", - "ChunkingStrategy", - "ChunkingStrategyAutoChunkingStrategyRequestParam", - "ChunkingStrategyStaticChunkingStrategyRequestParam", - "ChunkingStrategyStaticChunkingStrategyRequestParamStatic", -] +from ..file_chunking_strategy_param import FileChunkingStrategyParam + +__all__ = ["FileBatchCreateParams"] class FileBatchCreateParams(TypedDict, total=False): @@ -22,40 +18,9 @@ class FileBatchCreateParams(TypedDict, total=False): files. """ - chunking_strategy: ChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. - """ - - -class ChunkingStrategyAutoChunkingStrategyRequestParam(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ChunkingStrategyStaticChunkingStrategyRequestParamStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ChunkingStrategyStaticChunkingStrategyRequestParam(TypedDict, total=False): - static: Required[ChunkingStrategyStaticChunkingStrategyRequestParamStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ChunkingStrategy: TypeAlias = Union[ - ChunkingStrategyAutoChunkingStrategyRequestParam, ChunkingStrategyStaticChunkingStrategyRequestParam -] diff --git a/portkey_ai/_vendor/openai/types/beta/vector_stores/file_batch_list_files_params.py b/portkey_ai/_vendor/openai/types/beta/vector_stores/file_batch_list_files_params.py index 24dee7d5..2a0a6c6a 100644 --- a/portkey_ai/_vendor/openai/types/beta/vector_stores/file_batch_list_files_params.py +++ b/portkey_ai/_vendor/openai/types/beta/vector_stores/file_batch_list_files_params.py @@ -23,7 +23,7 @@ class FileBatchListFilesParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/portkey_ai/_vendor/openai/types/beta/vector_stores/file_create_params.py b/portkey_ai/_vendor/openai/types/beta/vector_stores/file_create_params.py index cfb80657..d074d766 100644 --- a/portkey_ai/_vendor/openai/types/beta/vector_stores/file_create_params.py +++ b/portkey_ai/_vendor/openai/types/beta/vector_stores/file_create_params.py @@ -2,16 +2,11 @@ from __future__ import annotations -from typing import Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict +from typing_extensions import Required, TypedDict -__all__ = [ - "FileCreateParams", - "ChunkingStrategy", - "ChunkingStrategyAutoChunkingStrategyRequestParam", - "ChunkingStrategyStaticChunkingStrategyRequestParam", - "ChunkingStrategyStaticChunkingStrategyRequestParamStatic", -] +from ..file_chunking_strategy_param import FileChunkingStrategyParam + +__all__ = ["FileCreateParams"] class FileCreateParams(TypedDict, total=False): @@ -22,40 +17,9 @@ class FileCreateParams(TypedDict, total=False): files. """ - chunking_strategy: ChunkingStrategy + chunking_strategy: FileChunkingStrategyParam """The chunking strategy used to chunk the file(s). - If not set, will use the `auto` strategy. - """ - - -class ChunkingStrategyAutoChunkingStrategyRequestParam(TypedDict, total=False): - type: Required[Literal["auto"]] - """Always `auto`.""" - - -class ChunkingStrategyStaticChunkingStrategyRequestParamStatic(TypedDict, total=False): - chunk_overlap_tokens: Required[int] - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. """ - - max_chunk_size_tokens: Required[int] - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ChunkingStrategyStaticChunkingStrategyRequestParam(TypedDict, total=False): - static: Required[ChunkingStrategyStaticChunkingStrategyRequestParamStatic] - - type: Required[Literal["static"]] - """Always `static`.""" - - -ChunkingStrategy: TypeAlias = Union[ - ChunkingStrategyAutoChunkingStrategyRequestParam, ChunkingStrategyStaticChunkingStrategyRequestParam -] diff --git a/portkey_ai/_vendor/openai/types/beta/vector_stores/file_list_params.py b/portkey_ai/_vendor/openai/types/beta/vector_stores/file_list_params.py index 23dd7f0d..867b5fb3 100644 --- a/portkey_ai/_vendor/openai/types/beta/vector_stores/file_list_params.py +++ b/portkey_ai/_vendor/openai/types/beta/vector_stores/file_list_params.py @@ -21,7 +21,7 @@ class FileListParams(TypedDict, total=False): """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if - you make a list request and receive 100 objects, ending with obj_foo, your + you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ diff --git a/portkey_ai/_vendor/openai/types/beta/vector_stores/vector_store_file.py b/portkey_ai/_vendor/openai/types/beta/vector_stores/vector_store_file.py index 65096e8d..e4608e15 100644 --- a/portkey_ai/_vendor/openai/types/beta/vector_stores/vector_store_file.py +++ b/portkey_ai/_vendor/openai/types/beta/vector_stores/vector_store_file.py @@ -1,19 +1,12 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Union, Optional -from typing_extensions import Literal, Annotated, TypeAlias +from typing import Optional +from typing_extensions import Literal -from ...._utils import PropertyInfo from ...._models import BaseModel +from ..file_chunking_strategy import FileChunkingStrategy -__all__ = [ - "VectorStoreFile", - "LastError", - "ChunkingStrategy", - "ChunkingStrategyStatic", - "ChunkingStrategyStaticStatic", - "ChunkingStrategyOther", -] +__all__ = ["VectorStoreFile", "LastError"] class LastError(BaseModel): @@ -24,38 +17,6 @@ class LastError(BaseModel): """A human-readable description of the error.""" -class ChunkingStrategyStaticStatic(BaseModel): - chunk_overlap_tokens: int - """The number of tokens that overlap between chunks. The default value is `400`. - - Note that the overlap must not exceed half of `max_chunk_size_tokens`. - """ - - max_chunk_size_tokens: int - """The maximum number of tokens in each chunk. - - The default value is `800`. The minimum value is `100` and the maximum value is - `4096`. - """ - - -class ChunkingStrategyStatic(BaseModel): - static: ChunkingStrategyStaticStatic - - type: Literal["static"] - """Always `static`.""" - - -class ChunkingStrategyOther(BaseModel): - type: Literal["other"] - """Always `other`.""" - - -ChunkingStrategy: TypeAlias = Annotated[ - Union[ChunkingStrategyStatic, ChunkingStrategyOther], PropertyInfo(discriminator="type") -] - - class VectorStoreFile(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" @@ -93,5 +54,5 @@ class VectorStoreFile(BaseModel): attached to. """ - chunking_strategy: Optional[ChunkingStrategy] = None + chunking_strategy: Optional[FileChunkingStrategy] = None """The strategy used to chunk the file.""" diff --git a/portkey_ai/_vendor/openai/types/chat/__init__.py b/portkey_ai/_vendor/openai/types/chat/__init__.py index a5cf3734..c623a982 100644 --- a/portkey_ai/_vendor/openai/types/chat/__init__.py +++ b/portkey_ai/_vendor/openai/types/chat/__init__.py @@ -4,6 +4,7 @@ from .chat_completion import ChatCompletion as ChatCompletion from .chat_completion_role import ChatCompletionRole as ChatCompletionRole +from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .parsed_chat_completion import ( ParsedChoice as ParsedChoice, @@ -11,14 +12,17 @@ ParsedChatCompletionMessage as ParsedChatCompletionMessage, ) from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage +from .chat_completion_modality import ChatCompletionModality as ChatCompletionModality from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .parsed_function_tool_call import ( ParsedFunction as ParsedFunction, ParsedFunctionToolCall as ParsedFunctionToolCall, ) from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam +from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob +from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam @@ -34,6 +38,9 @@ from .chat_completion_content_part_text_param import ( ChatCompletionContentPartTextParam as ChatCompletionContentPartTextParam, ) +from .chat_completion_developer_message_param import ( + ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, +) from .chat_completion_message_tool_call_param import ( ChatCompletionMessageToolCallParam as ChatCompletionMessageToolCallParam, ) @@ -43,6 +50,9 @@ from .chat_completion_content_part_image_param import ( ChatCompletionContentPartImageParam as ChatCompletionContentPartImageParam, ) +from .chat_completion_prediction_content_param import ( + ChatCompletionPredictionContentParam as ChatCompletionPredictionContentParam, +) from .chat_completion_tool_choice_option_param import ( ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam, ) @@ -52,3 +62,6 @@ from .chat_completion_function_call_option_param import ( ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam, ) +from .chat_completion_content_part_input_audio_param import ( + ChatCompletionContentPartInputAudioParam as ChatCompletionContentPartInputAudioParam, +) diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py index 2429d41d..35e3a3d7 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_assistant_message_param.py @@ -9,7 +9,13 @@ from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam -__all__ = ["ChatCompletionAssistantMessageParam", "ContentArrayOfContentPart", "FunctionCall"] +__all__ = ["ChatCompletionAssistantMessageParam", "Audio", "ContentArrayOfContentPart", "FunctionCall"] + + +class Audio(TypedDict, total=False): + id: Required[str] + """Unique identifier for a previous audio response from the model.""" + ContentArrayOfContentPart: TypeAlias = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam] @@ -31,6 +37,12 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" + audio: Optional[Audio] + """Data about a previous audio response from the model. + + [Learn more](https://platform.openai.com/docs/guides/audio). + """ + content: Union[str, Iterable[ContentArrayOfContentPart], None] """The contents of the assistant message. diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_audio.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio.py new file mode 100644 index 00000000..dd15508e --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + +from ..._models import BaseModel + +__all__ = ["ChatCompletionAudio"] + + +class ChatCompletionAudio(BaseModel): + id: str + """Unique identifier for this audio response.""" + + data: str + """ + Base64 encoded audio bytes generated by the model, in the format specified in + the request. + """ + + expires_at: int + """ + The Unix timestamp (in seconds) for when this audio response will no longer be + accessible on the server for use in multi-turn conversations. + """ + + transcript: str + """Transcript of the audio generated by the model.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py new file mode 100644 index 00000000..1e20a52b --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_audio_param.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionAudioParam"] + + +class ChatCompletionAudioParam(TypedDict, total=False): + format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]] + """Specifies the output audio format. + + Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. + """ + + voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] + """The voice the model uses to respond. + + Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also + supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices + are less expressive). + """ diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image_param.py index b1a186aa..9d407324 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_image_param.py @@ -15,7 +15,7 @@ class ImageURL(TypedDict, total=False): """Specifies the detail level of the image. Learn more in the - [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). """ diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_input_audio_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_input_audio_param.py new file mode 100644 index 00000000..0b1b1a80 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_input_audio_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionContentPartInputAudioParam", "InputAudio"] + + +class InputAudio(TypedDict, total=False): + data: Required[str] + """Base64 encoded audio data.""" + + format: Required[Literal["wav", "mp3"]] + """The format of the encoded audio data. Currently supports "wav" and "mp3".""" + + +class ChatCompletionContentPartInputAudioParam(TypedDict, total=False): + input_audio: Required[InputAudio] + + type: Required[Literal["input_audio"]] + """The type of the content part. Always `input_audio`.""" diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_param.py index e0c6e480..682d11f4 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_content_part_param.py @@ -7,9 +7,10 @@ from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam +from .chat_completion_content_part_input_audio_param import ChatCompletionContentPartInputAudioParam __all__ = ["ChatCompletionContentPartParam"] ChatCompletionContentPartParam: TypeAlias = Union[ - ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam + ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartInputAudioParam ] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_developer_message_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_developer_message_param.py new file mode 100644 index 00000000..01e4fdb6 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_developer_message_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + +__all__ = ["ChatCompletionDeveloperMessageParam"] + + +class ChatCompletionDeveloperMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] + """The contents of the developer message.""" + + role: Required[Literal["developer"]] + """The role of the messages author, in this case `developer`.""" + + name: str + """An optional name for the participant. + + Provides the model information to differentiate between participants of the same + role. + """ diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py index 492bb68c..704fa5d5 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message.py @@ -4,6 +4,7 @@ from typing_extensions import Literal from ..._models import BaseModel +from .chat_completion_audio import ChatCompletionAudio from .chat_completion_message_tool_call import ChatCompletionMessageToolCall __all__ = ["ChatCompletionMessage", "FunctionCall"] @@ -32,6 +33,13 @@ class ChatCompletionMessage(BaseModel): role: Literal["assistant"] """The role of the author of this message.""" + audio: Optional[ChatCompletionAudio] = None + """ + If the audio output modality is requested, this object contains data about the + audio response from the model. + [Learn more](https://platform.openai.com/docs/guides/audio). + """ + function_call: Optional[FunctionCall] = None """Deprecated and replaced by `tool_calls`. diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_param.py index ec65d94c..942da243 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_message_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_message_param.py @@ -10,10 +10,12 @@ from .chat_completion_system_message_param import ChatCompletionSystemMessageParam from .chat_completion_function_message_param import ChatCompletionFunctionMessageParam from .chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam +from .chat_completion_developer_message_param import ChatCompletionDeveloperMessageParam __all__ = ["ChatCompletionMessageParam"] ChatCompletionMessageParam: TypeAlias = Union[ + ChatCompletionDeveloperMessageParam, ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_modality.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_modality.py new file mode 100644 index 00000000..8e3c1459 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_modality.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ChatCompletionModality"] + +ChatCompletionModality: TypeAlias = Literal["text", "audio"] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_prediction_content_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_prediction_content_param.py new file mode 100644 index 00000000..c44e6e36 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_prediction_content_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + +__all__ = ["ChatCompletionPredictionContentParam"] + + +class ChatCompletionPredictionContentParam(TypedDict, total=False): + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] + """ + The content that should be matched when generating a model response. If + generated tokens would match this content, the entire model response can be + returned much more quickly. + """ + + type: Required[Literal["content"]] + """The type of the predicted content you want to provide. + + This type is currently always `content`. + """ diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_reasoning_effort.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_reasoning_effort.py new file mode 100644 index 00000000..9e794697 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_reasoning_effort.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ChatCompletionReasoningEffort"] + +ChatCompletionReasoningEffort: TypeAlias = Literal["low", "medium", "high"] diff --git a/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_param.py b/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_param.py index 0cf6ea72..6c2b1a36 100644 --- a/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_param.py +++ b/portkey_ai/_vendor/openai/types/chat/chat_completion_tool_param.py @@ -4,13 +4,13 @@ from typing_extensions import Literal, Required, TypedDict -from ...types import shared_params +from ..shared_params.function_definition import FunctionDefinition __all__ = ["ChatCompletionToolParam"] class ChatCompletionToolParam(TypedDict, total=False): - function: Required[shared_params.FunctionDefinition] + function: Required[FunctionDefinition] type: Required[Literal["function"]] """The type of the tool. Currently, only `function` is supported.""" diff --git a/portkey_ai/_vendor/openai/types/chat/completion_create_params.py b/portkey_ai/_vendor/openai/types/chat/completion_create_params.py index bf648a38..f168ddea 100644 --- a/portkey_ai/_vendor/openai/types/chat/completion_create_params.py +++ b/portkey_ai/_vendor/openai/types/chat/completion_create_params.py @@ -5,12 +5,19 @@ from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict -from ...types import shared_params from ..chat_model import ChatModel +from .chat_completion_modality import ChatCompletionModality from .chat_completion_tool_param import ChatCompletionToolParam +from .chat_completion_audio_param import ChatCompletionAudioParam from .chat_completion_message_param import ChatCompletionMessageParam +from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort +from ..shared_params.function_parameters import FunctionParameters +from ..shared_params.response_format_text import ResponseFormatText from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam +from .chat_completion_prediction_content_param import ChatCompletionPredictionContentParam from .chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam +from ..shared_params.response_format_json_object import ResponseFormatJSONObject +from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema from .chat_completion_function_call_option_param import ChatCompletionFunctionCallOptionParam __all__ = [ @@ -27,35 +34,48 @@ class CompletionCreateParamsBase(TypedDict, total=False): messages: Required[Iterable[ChatCompletionMessageParam]] """A list of messages comprising the conversation so far. - [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + Depending on the [model](https://platform.openai.com/docs/models) you use, + different message types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). """ model: Required[Union[str, ChatModel]] """ID of the model to use. See the - [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. """ + audio: Optional[ChatCompletionAudioParam] + """Parameters for audio output. + + Required when audio output is requested with `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + """ + frequency_penalty: Optional[float] """Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) """ function_call: FunctionCall """Deprecated in favor of `tool_choice`. - Controls which (if any) function is called by the model. `none` means the model - will not call a function and instead generates a message. `auto` means the model - can pick between generating a message or calling a function. Specifying a - particular function via `{"name": "my_function"}` forces the model to call that + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + `none` is the default when no functions are present. `auto` is the default if functions are present. """ @@ -84,15 +104,42 @@ class CompletionCreateParamsBase(TypedDict, total=False): `content` of `message`. """ + max_completion_tokens: Optional[int] + """ + An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + """ + max_tokens: Optional[int] """ The maximum number of [tokens](/tokenizer) that can be generated in the chat - completion. + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). + """ + + metadata: Optional[Dict[str, str]] + """ + Developer-defined tags and values used for filtering completions in the + [dashboard](https://platform.openai.com/chat-completions). + """ - The total length of input tokens and generated tokens is limited by the model's - context length. - [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + modalities: Optional[List[ChatCompletionModality]] + """ + Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` """ n: Optional[int] @@ -105,28 +152,41 @@ class CompletionCreateParamsBase(TypedDict, total=False): parallel_tool_calls: bool """ Whether to enable - [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. """ + prediction: Optional[ChatCompletionPredictionContentParam] + """ + Static predicted output content, such as the content of a text file that is + being regenerated. + """ + presence_penalty: Optional[float] """Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + """ + + reasoning_effort: ChatCompletionReasoningEffort + """**o1 models only** - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. """ response_format: ResponseFormat """An object specifying the format that the model must output. - Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to @@ -152,8 +212,11 @@ class CompletionCreateParamsBase(TypedDict, total=False): This parameter is relevant for customers subscribed to the scale tier service: - - If set to 'auto', the system will utilize scale tier credits until they are - exhausted. + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. @@ -165,6 +228,13 @@ class CompletionCreateParamsBase(TypedDict, total=False): stop: Union[Optional[str], List[str]] """Up to 4 sequences where the API will stop generating further tokens.""" + store: Optional[bool] + """ + Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. + """ + stream_options: Optional[ChatCompletionStreamOptionsParam] """Options for streaming response. Only set this when you set `stream: true`.""" @@ -172,9 +242,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. - - We generally recommend altering this or `top_p` but not both. + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. """ tool_choice: ChatCompletionToolChoiceOptionParam @@ -218,7 +287,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ @@ -239,7 +308,7 @@ class Function(TypedDict, total=False): how to call the function. """ - parameters: shared_params.FunctionParameters + parameters: FunctionParameters """The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for @@ -251,12 +320,10 @@ class Function(TypedDict, total=False): """ -ResponseFormat: TypeAlias = Union[ - shared_params.ResponseFormatText, shared_params.ResponseFormatJSONObject, shared_params.ResponseFormatJSONSchema -] +ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema] -class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase): +class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): stream: Optional[Literal[False]] """If set, partial message deltas will be sent, like in ChatGPT. diff --git a/portkey_ai/_vendor/openai/types/chat_model.py b/portkey_ai/_vendor/openai/types/chat_model.py index 686f26b7..e1ac4643 100644 --- a/portkey_ai/_vendor/openai/types/chat_model.py +++ b/portkey_ai/_vendor/openai/types/chat_model.py @@ -5,9 +5,22 @@ __all__ = ["ChatModel"] ChatModel: TypeAlias = Literal[ + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", "gpt-4o", + "gpt-4o-2024-11-20", "gpt-4o-2024-08-06", "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "chatgpt-4o-latest", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", diff --git a/portkey_ai/_vendor/openai/types/completion_create_params.py b/portkey_ai/_vendor/openai/types/completion_create_params.py index 9fe22fe3..fdb1680d 100644 --- a/portkey_ai/_vendor/openai/types/completion_create_params.py +++ b/portkey_ai/_vendor/openai/types/completion_create_params.py @@ -17,8 +17,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. """ prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]] @@ -53,7 +53,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) """ logit_bias: Optional[Dict[str, int]] @@ -106,7 +106,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) """ seed: Optional[int] @@ -156,11 +156,11 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ -class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase): +class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): stream: Optional[Literal[False]] """Whether to stream back partial progress. diff --git a/portkey_ai/_vendor/openai/types/completion_usage.py b/portkey_ai/_vendor/openai/types/completion_usage.py index ac09afd4..d8c4e84c 100644 --- a/portkey_ai/_vendor/openai/types/completion_usage.py +++ b/portkey_ai/_vendor/openai/types/completion_usage.py @@ -1,9 +1,40 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Optional from .._models import BaseModel -__all__ = ["CompletionUsage"] +__all__ = ["CompletionUsage", "CompletionTokensDetails", "PromptTokensDetails"] + + +class CompletionTokensDetails(BaseModel): + accepted_prediction_tokens: Optional[int] = None + """ + When using Predicted Outputs, the number of tokens in the prediction that + appeared in the completion. + """ + + audio_tokens: Optional[int] = None + """Audio input tokens generated by the model.""" + + reasoning_tokens: Optional[int] = None + """Tokens generated by the model for reasoning.""" + + rejected_prediction_tokens: Optional[int] = None + """ + When using Predicted Outputs, the number of tokens in the prediction that did + not appear in the completion. However, like reasoning tokens, these tokens are + still counted in the total completion tokens for purposes of billing, output, + and context window limits. + """ + + +class PromptTokensDetails(BaseModel): + audio_tokens: Optional[int] = None + """Audio input tokens present in the prompt.""" + + cached_tokens: Optional[int] = None + """Cached tokens present in the prompt.""" class CompletionUsage(BaseModel): @@ -15,3 +46,9 @@ class CompletionUsage(BaseModel): total_tokens: int """Total number of tokens used in the request (prompt + completion).""" + + completion_tokens_details: Optional[CompletionTokensDetails] = None + """Breakdown of tokens used in a completion.""" + + prompt_tokens_details: Optional[PromptTokensDetails] = None + """Breakdown of tokens used in the prompt.""" diff --git a/portkey_ai/_vendor/openai/types/embedding_create_params.py b/portkey_ai/_vendor/openai/types/embedding_create_params.py index 930b3b79..13857628 100644 --- a/portkey_ai/_vendor/openai/types/embedding_create_params.py +++ b/portkey_ai/_vendor/openai/types/embedding_create_params.py @@ -5,6 +5,8 @@ from typing import List, Union, Iterable from typing_extensions import Literal, Required, TypedDict +from .embedding_model import EmbeddingModel + __all__ = ["EmbeddingCreateParams"] @@ -20,14 +22,14 @@ class EmbeddingCreateParams(TypedDict, total=False): for counting tokens. """ - model: Required[Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]]] + model: Required[Union[str, EmbeddingModel]] """ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our - [Model overview](https://platform.openai.com/docs/models/overview) for - descriptions of them. + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. """ dimensions: int @@ -46,5 +48,5 @@ class EmbeddingCreateParams(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ diff --git a/portkey_ai/_vendor/openai/types/embedding_model.py b/portkey_ai/_vendor/openai/types/embedding_model.py new file mode 100644 index 00000000..075ff976 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/embedding_model.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["EmbeddingModel"] + +EmbeddingModel: TypeAlias = Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"] diff --git a/portkey_ai/_vendor/openai/types/file_create_params.py b/portkey_ai/_vendor/openai/types/file_create_params.py index 8b1c296f..ecf75033 100644 --- a/portkey_ai/_vendor/openai/types/file_create_params.py +++ b/portkey_ai/_vendor/openai/types/file_create_params.py @@ -2,9 +2,10 @@ from __future__ import annotations -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Required, TypedDict from .._types import FileTypes +from .file_purpose import FilePurpose __all__ = ["FileCreateParams"] @@ -13,7 +14,7 @@ class FileCreateParams(TypedDict, total=False): file: Required[FileTypes] """The File object (not file name) to be uploaded.""" - purpose: Required[Literal["assistants", "batch", "fine-tune", "vision"]] + purpose: Required[FilePurpose] """The intended purpose of the uploaded file. Use "assistants" for diff --git a/portkey_ai/_vendor/openai/types/file_list_params.py b/portkey_ai/_vendor/openai/types/file_list_params.py index 212eca13..058d874c 100644 --- a/portkey_ai/_vendor/openai/types/file_list_params.py +++ b/portkey_ai/_vendor/openai/types/file_list_params.py @@ -2,11 +2,32 @@ from __future__ import annotations -from typing_extensions import TypedDict +from typing_extensions import Literal, TypedDict __all__ = ["FileListParams"] class FileListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 10,000, and the default is 10,000. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ + purpose: str """Only return files with the given purpose.""" diff --git a/portkey_ai/_vendor/openai/types/file_purpose.py b/portkey_ai/_vendor/openai/types/file_purpose.py new file mode 100644 index 00000000..32dc352c --- /dev/null +++ b/portkey_ai/_vendor/openai/types/file_purpose.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["FilePurpose"] + +FilePurpose: TypeAlias = Literal["assistants", "batch", "fine-tune", "vision"] diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job.py b/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job.py index 7ac87927..f5a11c21 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job.py @@ -6,7 +6,16 @@ from ..._models import BaseModel from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject -__all__ = ["FineTuningJob", "Error", "Hyperparameters"] +__all__ = [ + "FineTuningJob", + "Error", + "Hyperparameters", + "Method", + "MethodDpo", + "MethodDpoHyperparameters", + "MethodSupervised", + "MethodSupervisedHyperparameters", +] class Error(BaseModel): @@ -24,15 +33,96 @@ class Error(BaseModel): class Hyperparameters(BaseModel): - n_epochs: Union[Literal["auto"], int] + batch_size: Union[Literal["auto"], int, None] = None + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + learning_rate_multiplier: Union[Literal["auto"], float, None] = None + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int, None] = None + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class MethodDpoHyperparameters(BaseModel): + batch_size: Union[Literal["auto"], int, None] = None + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + beta: Union[Literal["auto"], float, None] = None + """The beta value for the DPO method. + + A higher beta value will increase the weight of the penalty between the policy + and reference model. + """ + + learning_rate_multiplier: Union[Literal["auto"], float, None] = None + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int, None] = None """The number of epochs to train the model for. - An epoch refers to one full cycle through the training dataset. "auto" decides - the optimal number of epochs based on the size of the dataset. If setting the - number manually, we support any number between 1 and 50 epochs. + An epoch refers to one full cycle through the training dataset. """ +class MethodDpo(BaseModel): + hyperparameters: Optional[MethodDpoHyperparameters] = None + """The hyperparameters used for the fine-tuning job.""" + + +class MethodSupervisedHyperparameters(BaseModel): + batch_size: Union[Literal["auto"], int, None] = None + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + learning_rate_multiplier: Union[Literal["auto"], float, None] = None + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int, None] = None + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class MethodSupervised(BaseModel): + hyperparameters: Optional[MethodSupervisedHyperparameters] = None + """The hyperparameters used for the fine-tuning job.""" + + +class Method(BaseModel): + dpo: Optional[MethodDpo] = None + """Configuration for the DPO fine-tuning method.""" + + supervised: Optional[MethodSupervised] = None + """Configuration for the supervised fine-tuning method.""" + + type: Optional[Literal["supervised", "dpo"]] = None + """The type of method. Is either `supervised` or `dpo`.""" + + class FineTuningJob(BaseModel): id: str """The object identifier, which can be referenced in the API endpoints.""" @@ -61,8 +151,7 @@ class FineTuningJob(BaseModel): hyperparameters: Hyperparameters """The hyperparameters used for the fine-tuning job. - See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) - for more details. + This value will only be returned when running `supervised` jobs. """ model: str @@ -118,3 +207,6 @@ class FineTuningJob(BaseModel): integrations: Optional[List[FineTuningJobWandbIntegrationObject]] = None """A list of integrations to enable for this fine-tuning job.""" + + method: Optional[Method] = None + """The method used for fine-tuning.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_event.py b/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_event.py index 2d204bb9..1d728bd7 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_event.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/fine_tuning_job_event.py @@ -1,5 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +import builtins +from typing import Optional from typing_extensions import Literal from ..._models import BaseModel @@ -9,11 +11,22 @@ class FineTuningJobEvent(BaseModel): id: str + """The object identifier.""" created_at: int + """The Unix timestamp (in seconds) for when the fine-tuning job was created.""" level: Literal["info", "warn", "error"] + """The log level of the event.""" message: str + """The message of the event.""" object: Literal["fine_tuning.job.event"] + """The object type, which is always "fine_tuning.job.event".""" + + data: Optional[builtins.object] = None + """The data associated with the event.""" + + type: Optional[Literal["message", "metrics"]] = None + """The type of event.""" diff --git a/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py b/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py index e9be2ef1..09c3f857 100644 --- a/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py +++ b/portkey_ai/_vendor/openai/types/fine_tuning/job_create_params.py @@ -5,7 +5,17 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict -__all__ = ["JobCreateParams", "Hyperparameters", "Integration", "IntegrationWandb"] +__all__ = [ + "JobCreateParams", + "Hyperparameters", + "Integration", + "IntegrationWandb", + "Method", + "MethodDpo", + "MethodDpoHyperparameters", + "MethodSupervised", + "MethodSupervisedHyperparameters", +] class JobCreateParams(TypedDict, total=False): @@ -13,7 +23,7 @@ class JobCreateParams(TypedDict, total=False): """The name of the model to fine-tune. You can select one of the - [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). """ training_file: Required[str] @@ -26,8 +36,10 @@ class JobCreateParams(TypedDict, total=False): your file with the purpose `fine-tune`. The contents of the file should differ depending on if the model uses the - [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + format, or if the fine-tuning method uses the + [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) @@ -35,11 +47,17 @@ class JobCreateParams(TypedDict, total=False): """ hyperparameters: Hyperparameters - """The hyperparameters used for the fine-tuning job.""" + """ + The hyperparameters used for the fine-tuning job. This value is now deprecated + in favor of `method`, and should be passed in under the `method` parameter. + """ integrations: Optional[Iterable[Integration]] """A list of integrations to enable for your fine-tuning job.""" + method: Method + """The method used for fine-tuning.""" + seed: Optional[int] """The seed controls the reproducibility of the job. @@ -50,7 +68,7 @@ class JobCreateParams(TypedDict, total=False): suffix: Optional[str] """ - A string of up to 18 characters that will be added to your fine-tuned model + A string of up to 64 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like @@ -134,3 +152,73 @@ class Integration(TypedDict, total=False): can set an explicit display name for your run, add tags to your run, and set a default entity (team, username, etc) to be associated with your run. """ + + +class MethodDpoHyperparameters(TypedDict, total=False): + batch_size: Union[Literal["auto"], int] + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + beta: Union[Literal["auto"], float] + """The beta value for the DPO method. + + A higher beta value will increase the weight of the penalty between the policy + and reference model. + """ + + learning_rate_multiplier: Union[Literal["auto"], float] + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int] + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class MethodDpo(TypedDict, total=False): + hyperparameters: MethodDpoHyperparameters + """The hyperparameters used for the fine-tuning job.""" + + +class MethodSupervisedHyperparameters(TypedDict, total=False): + batch_size: Union[Literal["auto"], int] + """Number of examples in each batch. + + A larger batch size means that model parameters are updated less frequently, but + with lower variance. + """ + + learning_rate_multiplier: Union[Literal["auto"], float] + """Scaling factor for the learning rate. + + A smaller learning rate may be useful to avoid overfitting. + """ + + n_epochs: Union[Literal["auto"], int] + """The number of epochs to train the model for. + + An epoch refers to one full cycle through the training dataset. + """ + + +class MethodSupervised(TypedDict, total=False): + hyperparameters: MethodSupervisedHyperparameters + """The hyperparameters used for the fine-tuning job.""" + + +class Method(TypedDict, total=False): + dpo: MethodDpo + """Configuration for the DPO fine-tuning method.""" + + supervised: MethodSupervised + """Configuration for the supervised fine-tuning method.""" + + type: Literal["supervised", "dpo"] + """The type of method. Is either `supervised` or `dpo`.""" diff --git a/portkey_ai/_vendor/openai/types/image_create_variation_params.py b/portkey_ai/_vendor/openai/types/image_create_variation_params.py index d6ecf0f1..d20f6729 100644 --- a/portkey_ai/_vendor/openai/types/image_create_variation_params.py +++ b/portkey_ai/_vendor/openai/types/image_create_variation_params.py @@ -47,5 +47,5 @@ class ImageCreateVariationParams(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ diff --git a/portkey_ai/_vendor/openai/types/image_edit_params.py b/portkey_ai/_vendor/openai/types/image_edit_params.py index a596a869..1cb10611 100644 --- a/portkey_ai/_vendor/openai/types/image_edit_params.py +++ b/portkey_ai/_vendor/openai/types/image_edit_params.py @@ -58,5 +58,5 @@ class ImageEditParams(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ diff --git a/portkey_ai/_vendor/openai/types/image_generate_params.py b/portkey_ai/_vendor/openai/types/image_generate_params.py index 307adeb3..c88c45f5 100644 --- a/portkey_ai/_vendor/openai/types/image_generate_params.py +++ b/portkey_ai/_vendor/openai/types/image_generate_params.py @@ -61,5 +61,5 @@ class ImageGenerateParams(TypedDict, total=False): """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ diff --git a/portkey_ai/_vendor/openai/types/moderation.py b/portkey_ai/_vendor/openai/types/moderation.py index 5aa69182..e4ec182c 100644 --- a/portkey_ai/_vendor/openai/types/moderation.py +++ b/portkey_ai/_vendor/openai/types/moderation.py @@ -1,11 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List +from typing_extensions import Literal from pydantic import Field as FieldInfo from .._models import BaseModel -__all__ = ["Moderation", "Categories", "CategoryScores"] +__all__ = ["Moderation", "Categories", "CategoryAppliedInputTypes", "CategoryScores"] class Categories(BaseModel): @@ -36,6 +38,20 @@ class Categories(BaseModel): orientation, disability status, or caste. """ + illicit: bool + """ + Content that includes instructions or advice that facilitate the planning or + execution of wrongdoing, or that gives advice or instruction on how to commit + illicit acts. For example, "how to shoplift" would fit this category. + """ + + illicit_violent: bool = FieldInfo(alias="illicit/violent") + """ + Content that includes instructions or advice that facilitate the planning or + execution of wrongdoing that also includes violence, or that gives advice or + instruction on the procurement of any weapon. + """ + self_harm: bool = FieldInfo(alias="self-harm") """ Content that promotes, encourages, or depicts acts of self-harm, such as @@ -72,6 +88,47 @@ class Categories(BaseModel): """Content that depicts death, violence, or physical injury in graphic detail.""" +class CategoryAppliedInputTypes(BaseModel): + harassment: List[Literal["text"]] + """The applied input type(s) for the category 'harassment'.""" + + harassment_threatening: List[Literal["text"]] = FieldInfo(alias="harassment/threatening") + """The applied input type(s) for the category 'harassment/threatening'.""" + + hate: List[Literal["text"]] + """The applied input type(s) for the category 'hate'.""" + + hate_threatening: List[Literal["text"]] = FieldInfo(alias="hate/threatening") + """The applied input type(s) for the category 'hate/threatening'.""" + + illicit: List[Literal["text"]] + """The applied input type(s) for the category 'illicit'.""" + + illicit_violent: List[Literal["text"]] = FieldInfo(alias="illicit/violent") + """The applied input type(s) for the category 'illicit/violent'.""" + + self_harm: List[Literal["text", "image"]] = FieldInfo(alias="self-harm") + """The applied input type(s) for the category 'self-harm'.""" + + self_harm_instructions: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/instructions") + """The applied input type(s) for the category 'self-harm/instructions'.""" + + self_harm_intent: List[Literal["text", "image"]] = FieldInfo(alias="self-harm/intent") + """The applied input type(s) for the category 'self-harm/intent'.""" + + sexual: List[Literal["text", "image"]] + """The applied input type(s) for the category 'sexual'.""" + + sexual_minors: List[Literal["text"]] = FieldInfo(alias="sexual/minors") + """The applied input type(s) for the category 'sexual/minors'.""" + + violence: List[Literal["text", "image"]] + """The applied input type(s) for the category 'violence'.""" + + violence_graphic: List[Literal["text", "image"]] = FieldInfo(alias="violence/graphic") + """The applied input type(s) for the category 'violence/graphic'.""" + + class CategoryScores(BaseModel): harassment: float """The score for the category 'harassment'.""" @@ -85,6 +142,12 @@ class CategoryScores(BaseModel): hate_threatening: float = FieldInfo(alias="hate/threatening") """The score for the category 'hate/threatening'.""" + illicit: float + """The score for the category 'illicit'.""" + + illicit_violent: float = FieldInfo(alias="illicit/violent") + """The score for the category 'illicit/violent'.""" + self_harm: float = FieldInfo(alias="self-harm") """The score for the category 'self-harm'.""" @@ -111,6 +174,11 @@ class Moderation(BaseModel): categories: Categories """A list of the categories, and whether they are flagged or not.""" + category_applied_input_types: CategoryAppliedInputTypes + """ + A list of the categories along with the input type(s) that the score applies to. + """ + category_scores: CategoryScores """A list of the categories along with their scores as predicted by model.""" diff --git a/portkey_ai/_vendor/openai/types/moderation_create_params.py b/portkey_ai/_vendor/openai/types/moderation_create_params.py index 33768219..3ea2f3cd 100644 --- a/portkey_ai/_vendor/openai/types/moderation_create_params.py +++ b/portkey_ai/_vendor/openai/types/moderation_create_params.py @@ -2,26 +2,28 @@ from __future__ import annotations -from typing import List, Union +from typing import List, Union, Iterable from typing_extensions import Required, TypedDict from .moderation_model import ModerationModel +from .moderation_multi_modal_input_param import ModerationMultiModalInputParam __all__ = ["ModerationCreateParams"] class ModerationCreateParams(TypedDict, total=False): - input: Required[Union[str, List[str]]] - """The input text to classify""" + input: Required[Union[str, List[str], Iterable[ModerationMultiModalInputParam]]] + """Input (or inputs) to classify. - model: Union[str, ModerationModel] + Can be a single string, an array of strings, or an array of multi-modal input + objects similar to other models. """ - Two content moderations models are available: `text-moderation-stable` and - `text-moderation-latest`. - - The default is `text-moderation-latest` which will be automatically upgraded - over time. This ensures you are always using our most accurate model. If you use - `text-moderation-stable`, we will provide advanced notice before updating the - model. Accuracy of `text-moderation-stable` may be slightly lower than for - `text-moderation-latest`. + + model: Union[str, ModerationModel] + """The content moderation model you would like to use. + + Learn more in + [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + learn about available models + [here](https://platform.openai.com/docs/models#moderation). """ diff --git a/portkey_ai/_vendor/openai/types/moderation_image_url_input_param.py b/portkey_ai/_vendor/openai/types/moderation_image_url_input_param.py new file mode 100644 index 00000000..9a69a6a2 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/moderation_image_url_input_param.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ModerationImageURLInputParam", "ImageURL"] + + +class ImageURL(TypedDict, total=False): + url: Required[str] + """Either a URL of the image or the base64 encoded image data.""" + + +class ModerationImageURLInputParam(TypedDict, total=False): + image_url: Required[ImageURL] + """Contains either an image URL or a data URL for a base64 encoded image.""" + + type: Required[Literal["image_url"]] + """Always `image_url`.""" diff --git a/portkey_ai/_vendor/openai/types/moderation_model.py b/portkey_ai/_vendor/openai/types/moderation_model.py index f549aeeb..64954c45 100644 --- a/portkey_ai/_vendor/openai/types/moderation_model.py +++ b/portkey_ai/_vendor/openai/types/moderation_model.py @@ -4,4 +4,6 @@ __all__ = ["ModerationModel"] -ModerationModel: TypeAlias = Literal["text-moderation-latest", "text-moderation-stable"] +ModerationModel: TypeAlias = Literal[ + "omni-moderation-latest", "omni-moderation-2024-09-26", "text-moderation-latest", "text-moderation-stable" +] diff --git a/portkey_ai/_vendor/openai/types/moderation_multi_modal_input_param.py b/portkey_ai/_vendor/openai/types/moderation_multi_modal_input_param.py new file mode 100644 index 00000000..4314e7b0 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/moderation_multi_modal_input_param.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .moderation_text_input_param import ModerationTextInputParam +from .moderation_image_url_input_param import ModerationImageURLInputParam + +__all__ = ["ModerationMultiModalInputParam"] + +ModerationMultiModalInputParam: TypeAlias = Union[ModerationImageURLInputParam, ModerationTextInputParam] diff --git a/portkey_ai/_vendor/openai/types/moderation_text_input_param.py b/portkey_ai/_vendor/openai/types/moderation_text_input_param.py new file mode 100644 index 00000000..e5da5333 --- /dev/null +++ b/portkey_ai/_vendor/openai/types/moderation_text_input_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ModerationTextInputParam"] + + +class ModerationTextInputParam(TypedDict, total=False): + text: Required[str] + """A string of text to classify.""" + + type: Required[Literal["text"]] + """Always `text`.""" diff --git a/portkey_ai/_vendor/openai/types/shared_params/function_definition.py b/portkey_ai/_vendor/openai/types/shared_params/function_definition.py index f41392f1..d45ec13f 100644 --- a/portkey_ai/_vendor/openai/types/shared_params/function_definition.py +++ b/portkey_ai/_vendor/openai/types/shared_params/function_definition.py @@ -5,7 +5,7 @@ from typing import Optional from typing_extensions import Required, TypedDict -from ...types import shared_params +from .function_parameters import FunctionParameters __all__ = ["FunctionDefinition"] @@ -24,7 +24,7 @@ class FunctionDefinition(TypedDict, total=False): how to call the function. """ - parameters: shared_params.FunctionParameters + parameters: FunctionParameters """The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for diff --git a/portkey_ai/_vendor/openai/types/upload_create_params.py b/portkey_ai/_vendor/openai/types/upload_create_params.py index 3165ebcc..2ebabe6c 100644 --- a/portkey_ai/_vendor/openai/types/upload_create_params.py +++ b/portkey_ai/_vendor/openai/types/upload_create_params.py @@ -2,7 +2,9 @@ from __future__ import annotations -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Required, TypedDict + +from .file_purpose import FilePurpose __all__ = ["UploadCreateParams"] @@ -21,7 +23,7 @@ class UploadCreateParams(TypedDict, total=False): supported MIME types for assistants and vision. """ - purpose: Required[Literal["assistants", "batch", "fine-tune", "vision"]] + purpose: Required[FilePurpose] """The intended purpose of the uploaded file. See the diff --git a/portkey_ai/_vendor/openai/types/websocket_connection_options.py b/portkey_ai/_vendor/openai/types/websocket_connection_options.py new file mode 100644 index 00000000..40fd24ab --- /dev/null +++ b/portkey_ai/_vendor/openai/types/websocket_connection_options.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing_extensions import Sequence, TypedDict + +if TYPE_CHECKING: + from websockets import Subprotocol + from websockets.extensions import ClientExtensionFactory + + +class WebsocketConnectionOptions(TypedDict, total=False): + """Websocket connection options copied from `websockets`. + + For example: https://websockets.readthedocs.io/en/stable/reference/asyncio/client.html#websockets.asyncio.client.connect + """ + + extensions: Sequence[ClientExtensionFactory] | None + """List of supported extensions, in order in which they should be negotiated and run.""" + + subprotocols: Sequence[Subprotocol] | None + """List of supported subprotocols, in order of decreasing preference.""" + + compression: str | None + """The “permessage-deflate” extension is enabled by default. Set compression to None to disable it. See the [compression guide](https://websockets.readthedocs.io/en/stable/topics/compression.html) for details.""" + + # limits + max_size: int | None + """Maximum size of incoming messages in bytes. None disables the limit.""" + + max_queue: int | None | tuple[int | None, int | None] + """High-water mark of the buffer where frames are received. It defaults to 16 frames. The low-water mark defaults to max_queue // 4. You may pass a (high, low) tuple to set the high-water and low-water marks. If you want to disable flow control entirely, you may set it to None, although that’s a bad idea.""" + + write_limit: int | tuple[int, int | None] + """High-water mark of write buffer in bytes. It is passed to set_write_buffer_limits(). It defaults to 32 KiB. You may pass a (high, low) tuple to set the high-water and low-water marks.""" diff --git a/portkey_ai/api_resources/__init__.py b/portkey_ai/api_resources/__init__.py index ebacc055..715ff4c4 100644 --- a/portkey_ai/api_resources/__init__.py +++ b/portkey_ai/api_resources/__init__.py @@ -77,6 +77,10 @@ AsyncVirtualKeys, Logs, AsyncLogs, + BetaRealtime, + AsyncBetaRealtime, + BetaSessions, + AsyncBetaSessions, ) from .utils import ( Modes, @@ -189,4 +193,8 @@ "AsyncVirtualKeys", "Logs", "AsyncLogs", + "BetaRealtime", + "AsyncBetaRealtime", + "BetaSessions", + "AsyncBetaSessions", ] diff --git a/portkey_ai/api_resources/apis/__init__.py b/portkey_ai/api_resources/apis/__init__.py index 425c3300..599f413b 100644 --- a/portkey_ai/api_resources/apis/__init__.py +++ b/portkey_ai/api_resources/apis/__init__.py @@ -74,6 +74,13 @@ AsyncBetaCompletions, ) +from .beta_realtime import ( + BetaRealtime, + AsyncBetaRealtime, + BetaSessions, + AsyncBetaSessions, +) + from .uploads import ( Uploads, Parts, @@ -171,4 +178,8 @@ "AsyncVirtualKeys", "Logs", "AsyncLogs", + "BetaRealtime", + "AsyncBetaRealtime", + "BetaSessions", + "AsyncBetaSessions", ] diff --git a/portkey_ai/api_resources/apis/audio.py b/portkey_ai/api_resources/apis/audio.py index b82f7ef8..bfce244a 100644 --- a/portkey_ai/api_resources/apis/audio.py +++ b/portkey_ai/api_resources/apis/audio.py @@ -5,7 +5,12 @@ from portkey_ai.api_resources.client import AsyncPortkey, Portkey import typing -from portkey_ai.api_resources.types.audio_types import Transcription, Translation +from portkey_ai.api_resources.types.audio_types import ( + Transcription, + TranscriptionVerbose, + Translation, + TranslationVerbose, +) class Audio(APIResource): @@ -34,7 +39,7 @@ def create( temperature: Union[float, NotGiven] = NOT_GIVEN, timestamp_granularities: Union[List[str], NotGiven] = NOT_GIVEN, **kwargs - ) -> Transcription: + ) -> Union[Transcription, TranscriptionVerbose, str]: extra_headers = kwargs.pop("extra_headers", {}) response = self.openai_client.with_raw_response.audio.transcriptions.create( file=file, @@ -47,8 +52,16 @@ def create( extra_headers=extra_headers, extra_body=kwargs, ) - data = Transcription(**json.loads(response.text)) - data._headers = response.headers + + if response_format == "verbose_json": + data = TranscriptionVerbose(**json.loads(response.text)) + data._headers = response.headers + elif response_format == "json": + data = Transcription(**json.loads(response.text)) + data._headers = response.headers + else: + data = Transcription(**json.loads(response.text)) + data._headers = response.headers return data @@ -64,16 +77,14 @@ def create( file: FileTypes, model: str, prompt: Union[str, NotGiven] = NOT_GIVEN, - response_format: Union[str, NotGiven] = NOT_GIVEN, temperature: Union[float, NotGiven] = NOT_GIVEN, **kwargs - ) -> Translation: + ) -> Union[Translation, TranslationVerbose, str]: extra_headers = kwargs.pop("extra_headers", {}) response = self.openai_client.with_raw_response.audio.translations.create( file=file, model=model, prompt=prompt, - response_format=response_format, temperature=temperature, extra_headers=extra_headers, extra_body=kwargs, @@ -143,7 +154,7 @@ async def create( temperature: Union[float, NotGiven] = NOT_GIVEN, timestamp_granularities: Union[List[str], NotGiven] = NOT_GIVEN, **kwargs - ) -> Transcription: + ) -> Union[Transcription, TranscriptionVerbose, str]: extra_headers = kwargs.pop("extra_headers", {}) response = ( await self.openai_client.with_raw_response.audio.transcriptions.create( @@ -158,8 +169,16 @@ async def create( extra_body=kwargs, ) ) - data = Transcription(**json.loads(response.text)) - data._headers = response.headers + + if response_format == "verbose_json": + data = TranscriptionVerbose(**json.loads(response.text)) + data._headers = response.headers + elif response_format == "json": + data = Transcription(**json.loads(response.text)) + data._headers = response.headers + else: + data = Transcription(**json.loads(response.text)) + data._headers = response.headers return data @@ -175,16 +194,14 @@ async def create( file: FileTypes, model: str, prompt: Union[str, NotGiven] = NOT_GIVEN, - response_format: Union[str, NotGiven] = NOT_GIVEN, temperature: Union[float, NotGiven] = NOT_GIVEN, **kwargs - ) -> Translation: + ) -> Union[Translation, TranslationVerbose, str]: extra_headers = kwargs.pop("extra_headers", {}) response = await self.openai_client.with_raw_response.audio.translations.create( file=file, model=model, prompt=prompt, - response_format=response_format, temperature=temperature, extra_headers=extra_headers, extra_body=kwargs, diff --git a/portkey_ai/api_resources/apis/beta_chat.py b/portkey_ai/api_resources/apis/beta_chat.py index 4f42cf27..45bf9f54 100644 --- a/portkey_ai/api_resources/apis/beta_chat.py +++ b/portkey_ai/api_resources/apis/beta_chat.py @@ -1,4 +1,4 @@ -from typing import Any, Union +from typing import Any, Dict, List, Union from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.types.beta_chat_type import ParsedChatCompletion @@ -24,6 +24,13 @@ def parse( model: Union[str, Any] = "portkey-default", response_format: Union[Any, NotGiven] = NOT_GIVEN, tools: Union[Any, NotGiven] = NOT_GIVEN, + audio: Union[Any, NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[int, NotGiven] = NOT_GIVEN, + metadata: Union[Dict[str, str], NotGiven] = NOT_GIVEN, + modalities: Union[List[Any], NotGiven] = NOT_GIVEN, + prediction: Union[Any, NotGiven] = NOT_GIVEN, + reasoning_effort: Union[Any, NotGiven] = NOT_GIVEN, + store: Union[bool, NotGiven] = NOT_GIVEN, **kwargs: Any, ) -> ParsedChatCompletion: response = self.openai_client.beta.chat.completions.parse( @@ -31,6 +38,13 @@ def parse( model=model, response_format=response_format, tools=tools, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, extra_body=kwargs, ) return response # type: ignore [return-value] @@ -42,6 +56,13 @@ def stream( model: Union[str, Any] = "portkey-default", response_format: Union[Any, NotGiven] = NOT_GIVEN, tools: Union[Any, NotGiven] = NOT_GIVEN, + audio: Union[Any, NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[int, NotGiven] = NOT_GIVEN, + metadata: Union[Dict[str, str], NotGiven] = NOT_GIVEN, + modalities: Union[List[Any], NotGiven] = NOT_GIVEN, + prediction: Union[Any, NotGiven] = NOT_GIVEN, + reasoning_effort: Union[Any, NotGiven] = NOT_GIVEN, + store: Union[bool, NotGiven] = NOT_GIVEN, **kwargs: Any, ) -> Any: with self.openai_client.beta.chat.completions.stream( @@ -49,6 +70,13 @@ def stream( model=model, response_format=response_format, tools=tools, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, extra_body=kwargs, ) as stream: for event in stream: @@ -82,6 +110,13 @@ async def parse( model: Union[str, Any] = "portkey-default", response_format: Union[Any, NotGiven] = NOT_GIVEN, tools: Union[Any, NotGiven] = NOT_GIVEN, + audio: Union[Any, NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[int, NotGiven] = NOT_GIVEN, + metadata: Union[Dict[str, str], NotGiven] = NOT_GIVEN, + modalities: Union[List[Any], NotGiven] = NOT_GIVEN, + prediction: Union[Any, NotGiven] = NOT_GIVEN, + reasoning_effort: Union[Any, NotGiven] = NOT_GIVEN, + store: Union[bool, NotGiven] = NOT_GIVEN, **kwargs: Any, ) -> ParsedChatCompletion: response = await self.openai_client.beta.chat.completions.parse( @@ -89,6 +124,13 @@ async def parse( model=model, response_format=response_format, tools=tools, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, extra_body=kwargs, ) return response # type: ignore [return-value] @@ -100,6 +142,13 @@ async def stream( model: Union[str, Any] = "portkey-default", response_format: Union[Any, NotGiven] = NOT_GIVEN, tools: Union[Any, NotGiven] = NOT_GIVEN, + audio: Union[Any, NotGiven] = NOT_GIVEN, + max_completion_tokens: Union[int, NotGiven] = NOT_GIVEN, + metadata: Union[Dict[str, str], NotGiven] = NOT_GIVEN, + modalities: Union[List[Any], NotGiven] = NOT_GIVEN, + prediction: Union[Any, NotGiven] = NOT_GIVEN, + reasoning_effort: Union[Any, NotGiven] = NOT_GIVEN, + store: Union[bool, NotGiven] = NOT_GIVEN, **kwargs: Any, ) -> Any: async with self.openai_client.beta.chat.completions.stream( @@ -107,6 +156,13 @@ async def stream( model=model, response_format=response_format, tools=tools, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, extra_body=kwargs, ) as stream: async for event in stream: diff --git a/portkey_ai/api_resources/apis/beta_realtime.py b/portkey_ai/api_resources/apis/beta_realtime.py new file mode 100644 index 00000000..3ddcd6bc --- /dev/null +++ b/portkey_ai/api_resources/apis/beta_realtime.py @@ -0,0 +1,137 @@ +import json +from typing import Any, Iterable, List, Union +from portkey_ai._vendor.openai.resources.beta.realtime.realtime import ( + AsyncRealtimeConnectionManager, + RealtimeConnectionManager, +) +from portkey_ai._vendor.openai.types.websocket_connection_options import ( + WebsocketConnectionOptions, +) +from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource +from portkey_ai.api_resources.client import AsyncPortkey, Portkey +from portkey_ai.api_resources.types.beta_realtime import SessionCreateResponse +from ..._vendor.openai._types import NotGiven, NOT_GIVEN + + +class BetaRealtime(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.sessions = BetaSessions(client) + + def connect( + self, + *, + model: str, + websocket_connection_options: WebsocketConnectionOptions = {}, + **kwargs, + ) -> RealtimeConnectionManager: + return self.openai_client.beta.realtime.connect( + model=model, + websocket_connection_options=websocket_connection_options, + extra_headers=self.openai_client.default_headers, + **kwargs, + ) + + +class AsyncBetaRealtime(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + self.sessions = AsyncBetaSessions(client) + + def connect( + self, + *, + model: str, + websocket_connection_options: WebsocketConnectionOptions = {}, + **kwargs, + ) -> AsyncRealtimeConnectionManager: + return self.openai_client.beta.realtime.connect( + model=model, + websocket_connection_options=websocket_connection_options, + extra_headers=self.openai_client.default_headers, + **kwargs, + ) + + +class BetaSessions(APIResource): + def __init__(self, client: Portkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + def create( + self, + *, + model: Any = "portkey-default", + input_audio_format: Union[Any, NotGiven] = NOT_GIVEN, + input_audio_transcription: Union[Any, NotGiven] = NOT_GIVEN, + instructions: Union[str, NotGiven] = NOT_GIVEN, + max_response_output_tokens: Union[int, Any, NotGiven] = NOT_GIVEN, + modalities: Union[List[Any], NotGiven] = NOT_GIVEN, + output_audio_format: Union[Any, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + tool_choice: Union[str, NotGiven] = NOT_GIVEN, + tools: Union[Iterable[Any], NotGiven] = NOT_GIVEN, + turn_detection: Union[Any, NotGiven] = NOT_GIVEN, + voice: Union[Any, NotGiven] = NOT_GIVEN, + ) -> SessionCreateResponse: + response = self.openai_client.with_raw_response.beta.realtime.sessions.create( + model=model, + input_audio_format=input_audio_format, + input_audio_transcription=input_audio_transcription, + instructions=instructions, + max_response_output_tokens=max_response_output_tokens, + modalities=modalities, + output_audio_format=output_audio_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + turn_detection=turn_detection, + voice=voice, + ) + data = SessionCreateResponse(**json.loads(response.text)) + data._headers = response.headers + return data + + +class AsyncBetaSessions(AsyncAPIResource): + def __init__(self, client: AsyncPortkey) -> None: + super().__init__(client) + self.openai_client = client.openai_client + + async def create( + self, + *, + model: Any = "portkey-default", + input_audio_format: Union[Any, NotGiven] = NOT_GIVEN, + input_audio_transcription: Union[Any, NotGiven] = NOT_GIVEN, + instructions: Union[str, NotGiven] = NOT_GIVEN, + max_response_output_tokens: Union[int, Any, NotGiven] = NOT_GIVEN, + modalities: Union[List[Any], NotGiven] = NOT_GIVEN, + output_audio_format: Union[Any, NotGiven] = NOT_GIVEN, + temperature: Union[float, NotGiven] = NOT_GIVEN, + tool_choice: Union[str, NotGiven] = NOT_GIVEN, + tools: Union[Iterable[Any], NotGiven] = NOT_GIVEN, + turn_detection: Union[Any, NotGiven] = NOT_GIVEN, + voice: Union[Any, NotGiven] = NOT_GIVEN, + ) -> SessionCreateResponse: + response = ( + await self.openai_client.with_raw_response.beta.realtime.sessions.create( + model=model, + input_audio_format=input_audio_format, + input_audio_transcription=input_audio_transcription, + instructions=instructions, + max_response_output_tokens=max_response_output_tokens, + modalities=modalities, + output_audio_format=output_audio_format, + temperature=temperature, + tool_choice=tool_choice, + tools=tools, + turn_detection=turn_detection, + voice=voice, + ) + ) + data = SessionCreateResponse(**json.loads(response.text)) + data._headers = response.headers + return data diff --git a/portkey_ai/api_resources/apis/chat_complete.py b/portkey_ai/api_resources/apis/chat_complete.py index 9579ea3f..cd899215 100644 --- a/portkey_ai/api_resources/apis/chat_complete.py +++ b/portkey_ai/api_resources/apis/chat_complete.py @@ -4,8 +4,10 @@ from typing import ( Any, AsyncIterator, + Dict, Iterable, Iterator, + List, Mapping, Optional, Union, @@ -44,7 +46,21 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client def stream_create( # type: ignore[return] - self, model, messages, stream, temperature, max_tokens, top_p, **kwargs + self, + model, + messages, + stream, + temperature, + max_tokens, + top_p, + audio, + max_completion_tokens, + metadata, + modalities, + prediction, + reasoning_effort, + store, + **kwargs, ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: with self.openai_client.with_streaming_response.chat.completions.create( model=model, @@ -53,6 +69,13 @@ def stream_create( # type: ignore[return] temperature=temperature, max_tokens=max_tokens, top_p=top_p, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, extra_body=kwargs, ) as response: for line in response.iter_lines(): @@ -70,7 +93,21 @@ def stream_create( # type: ignore[return] return "" def normal_create( - self, model, messages, stream, temperature, max_tokens, top_p, **kwargs + self, + model, + messages, + stream, + temperature, + max_tokens, + top_p, + audio, + max_completion_tokens, + metadata, + modalities, + prediction, + reasoning_effort, + store, + **kwargs, ) -> ChatCompletions: response = self.openai_client.with_raw_response.chat.completions.create( model=model, @@ -79,6 +116,13 @@ def normal_create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, extra_body=kwargs, ) data = ChatCompletions(**json.loads(response.text)) @@ -94,6 +138,13 @@ def create( temperature: Union[float, NotGiven] = NOT_GIVEN, max_tokens: Union[int, NotGiven] = NOT_GIVEN, top_p: Union[float, NotGiven] = NOT_GIVEN, + audio: Optional[Any] = NOT_GIVEN, + max_completion_tokens: Union[int, NotGiven] = NOT_GIVEN, + metadata: Union[Dict[str, str], NotGiven] = NOT_GIVEN, + modalities: Union[List[Any], NotGiven] = NOT_GIVEN, + prediction: Union[Any, NotGiven] = NOT_GIVEN, + reasoning_effort: Union[Any, NotGiven] = NOT_GIVEN, + store: Union[Optional[bool], NotGiven] = NOT_GIVEN, **kwargs, ) -> Union[ChatCompletions, Iterator[ChatCompletionChunk]]: if stream is True: @@ -104,6 +155,13 @@ def create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, **kwargs, ) else: @@ -114,6 +172,13 @@ def create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, **kwargs, ) @@ -124,7 +189,21 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client async def stream_create( - self, model, messages, stream, temperature, max_tokens, top_p, **kwargs + self, + model, + messages, + stream, + temperature, + max_tokens, + top_p, + audio, + max_completion_tokens, + metadata, + modalities, + prediction, + reasoning_effort, + store, + **kwargs, ) -> Union[ChatCompletions, AsyncIterator[ChatCompletionChunk]]: async with self.openai_client.with_streaming_response.chat.completions.create( model=model, @@ -133,6 +212,13 @@ async def stream_create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, extra_body=kwargs, ) as response: async for line in response.iter_lines(): @@ -150,7 +236,21 @@ async def stream_create( pass async def normal_create( - self, model, messages, stream, temperature, max_tokens, top_p, **kwargs + self, + model, + messages, + stream, + temperature, + max_tokens, + top_p, + audio, + max_completion_tokens, + metadata, + modalities, + prediction, + reasoning_effort, + store, + **kwargs, ) -> ChatCompletions: response = await self.openai_client.with_raw_response.chat.completions.create( model=model, @@ -159,6 +259,13 @@ async def normal_create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, extra_body=kwargs, ) data = ChatCompletions(**json.loads(response.text)) @@ -174,6 +281,13 @@ async def create( temperature: Union[float, NotGiven] = NOT_GIVEN, max_tokens: Union[int, NotGiven] = NOT_GIVEN, top_p: Union[float, NotGiven] = NOT_GIVEN, + audio: Optional[Any] = NOT_GIVEN, + max_completion_tokens: Union[int, NotGiven] = NOT_GIVEN, + metadata: Union[Dict[str, str], NotGiven] = NOT_GIVEN, + modalities: Union[List[Any], NotGiven] = NOT_GIVEN, + prediction: Union[Any, NotGiven] = NOT_GIVEN, + reasoning_effort: Union[Any, NotGiven] = NOT_GIVEN, + store: Union[Optional[bool], NotGiven] = NOT_GIVEN, **kwargs, ) -> Union[ChatCompletions, AsyncIterator[ChatCompletionChunk]]: if stream is True: @@ -184,6 +298,13 @@ async def create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, **kwargs, ) else: @@ -194,6 +315,13 @@ async def create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + audio=audio, + max_completion_tokens=max_completion_tokens, + metadata=metadata, + modalities=modalities, + prediction=prediction, + reasoning_effort=reasoning_effort, + store=store, **kwargs, ) diff --git a/portkey_ai/api_resources/apis/complete.py b/portkey_ai/api_resources/apis/complete.py index 00d9e903..f0a1cbad 100644 --- a/portkey_ai/api_resources/apis/complete.py +++ b/portkey_ai/api_resources/apis/complete.py @@ -1,5 +1,5 @@ import json -from typing import AsyncIterator, Iterator, Optional, Union +from typing import Any, AsyncIterator, Dict, Iterable, Iterator, List, Optional, Union from portkey_ai.api_resources.client import AsyncPortkey, Portkey from ..._vendor.openai._types import NotGiven, NOT_GIVEN @@ -18,7 +18,26 @@ def __init__(self, client: Portkey) -> None: self.client = client def stream_create( # type: ignore[return] - self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs + self, + model, + prompt, + stream, + temperature, + max_tokens, + top_p, + best_of, + echo, + frequency_penalty, + logit_bias, + logprobs, + n, + presence_penalty, + seed, + stop, + suffix, + user, + stream_options, + **kwargs, ) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: with self.openai_client.with_streaming_response.completions.create( model=model, @@ -27,6 +46,18 @@ def stream_create( # type: ignore[return] temperature=temperature, max_tokens=max_tokens, top_p=top_p, + best_of=best_of, + echo=echo, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + logprobs=logprobs, + n=n, + presence_penalty=presence_penalty, + seed=seed, + stop=stop, + suffix=suffix, + user=user, + stream_options=stream_options, extra_body=kwargs, ) as response: for line in response.iter_lines(): @@ -44,7 +75,26 @@ def stream_create( # type: ignore[return] return "" def normal_create( - self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs + self, + model, + prompt, + stream, + temperature, + max_tokens, + top_p, + best_of, + echo, + frequency_penalty, + logit_bias, + logprobs, + n, + presence_penalty, + seed, + stop, + suffix, + user, + stream_options, + **kwargs, ) -> TextCompletion: response = self.openai_client.with_raw_response.completions.create( model=model, @@ -53,6 +103,18 @@ def normal_create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + best_of=best_of, + echo=echo, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + logprobs=logprobs, + n=n, + presence_penalty=presence_penalty, + seed=seed, + stop=stop, + suffix=suffix, + user=user, + stream_options=stream_options, extra_body=kwargs, ) data = TextCompletion(**json.loads(response.text)) @@ -63,11 +125,25 @@ def create( self, *, model: Optional[str] = "portkey-default", - prompt: Optional[str] = None, + prompt: Union[ + str, List[str], Iterable[int], Iterable[Iterable[int]], None + ] = None, stream: Union[bool, NotGiven] = NOT_GIVEN, temperature: Union[float, NotGiven] = NOT_GIVEN, max_tokens: Union[int, NotGiven] = NOT_GIVEN, top_p: Union[bool, NotGiven] = NOT_GIVEN, + best_of: Union[int, NotGiven] = NOT_GIVEN, + echo: Union[bool, NotGiven] = NOT_GIVEN, + frequency_penalty: Union[float, NotGiven] = NOT_GIVEN, + logit_bias: Union[Dict[str, int], NotGiven] = NOT_GIVEN, + logprobs: Union[int, NotGiven] = NOT_GIVEN, + n: Union[int, NotGiven] = NOT_GIVEN, + presence_penalty: Union[float, NotGiven] = NOT_GIVEN, + seed: Union[int, NotGiven] = NOT_GIVEN, + stop: Union[Optional[str], List[str], None, NotGiven] = NOT_GIVEN, + suffix: Union[str, NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + stream_options: Optional[Any] = NOT_GIVEN, **kwargs, ) -> Union[TextCompletion, Iterator[TextCompletionChunk]]: if stream is True: @@ -78,6 +154,18 @@ def create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + best_of=best_of, + echo=echo, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + logprobs=logprobs, + n=n, + presence_penalty=presence_penalty, + seed=seed, + stop=stop, + suffix=suffix, + user=user, + stream_options=stream_options, **kwargs, ) else: @@ -88,6 +176,18 @@ def create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + best_of=best_of, + echo=echo, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + logprobs=logprobs, + n=n, + presence_penalty=presence_penalty, + seed=seed, + stop=stop, + suffix=suffix, + user=user, + stream_options=stream_options, **kwargs, ) @@ -98,7 +198,26 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client async def stream_create( - self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs + self, + model, + prompt, + stream, + temperature, + max_tokens, + top_p, + best_of, + echo, + frequency_penalty, + logit_bias, + logprobs, + n, + presence_penalty, + seed, + stop, + suffix, + user, + stream_options, + **kwargs, ) -> Union[TextCompletion, AsyncIterator[TextCompletionChunk]]: async with self.openai_client.with_streaming_response.completions.create( model=model, @@ -107,6 +226,18 @@ async def stream_create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + best_of=best_of, + echo=echo, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + logprobs=logprobs, + n=n, + presence_penalty=presence_penalty, + seed=seed, + stop=stop, + suffix=suffix, + user=user, + stream_options=stream_options, extra_body=kwargs, ) as response: async for line in response.iter_lines(): @@ -124,7 +255,26 @@ async def stream_create( pass async def normal_create( - self, model, prompt, stream, temperature, max_tokens, top_p, **kwargs + self, + model, + prompt, + stream, + temperature, + max_tokens, + top_p, + best_of, + echo, + frequency_penalty, + logit_bias, + logprobs, + n, + presence_penalty, + seed, + stop, + suffix, + user, + stream_options, + **kwargs, ) -> TextCompletion: response = await self.openai_client.with_raw_response.completions.create( model=model, @@ -133,6 +283,18 @@ async def normal_create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + best_of=best_of, + echo=echo, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + logprobs=logprobs, + n=n, + presence_penalty=presence_penalty, + seed=seed, + stop=stop, + suffix=suffix, + user=user, + stream_options=stream_options, extra_body=kwargs, ) data = TextCompletion(**json.loads(response.text)) @@ -143,11 +305,25 @@ async def create( self, *, model: Optional[str] = "portkey-default", - prompt: Optional[str] = None, + prompt: Union[ + str, List[str], Iterable[int], Iterable[Iterable[int]], None + ] = None, stream: Union[bool, NotGiven] = NOT_GIVEN, temperature: Union[float, NotGiven] = NOT_GIVEN, max_tokens: Union[int, NotGiven] = NOT_GIVEN, top_p: Union[bool, NotGiven] = NOT_GIVEN, + best_of: Union[int, NotGiven] = NOT_GIVEN, + echo: Union[bool, NotGiven] = NOT_GIVEN, + frequency_penalty: Union[float, NotGiven] = NOT_GIVEN, + logit_bias: Union[Dict[str, int], NotGiven] = NOT_GIVEN, + logprobs: Union[int, NotGiven] = NOT_GIVEN, + n: Union[int, NotGiven] = NOT_GIVEN, + presence_penalty: Union[float, NotGiven] = NOT_GIVEN, + seed: Union[int, NotGiven] = NOT_GIVEN, + stop: Union[Optional[str], List[str], None, NotGiven] = NOT_GIVEN, + suffix: Union[str, NotGiven] = NOT_GIVEN, + user: Union[str, NotGiven] = NOT_GIVEN, + stream_options: Optional[Any] = NOT_GIVEN, **kwargs, ) -> Union[TextCompletion, AsyncIterator[TextCompletionChunk]]: if stream is True: @@ -158,6 +334,18 @@ async def create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + best_of=best_of, + echo=echo, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + logprobs=logprobs, + n=n, + presence_penalty=presence_penalty, + seed=seed, + stop=stop, + suffix=suffix, + user=user, + stream_options=stream_options, **kwargs, ) else: @@ -168,5 +356,17 @@ async def create( temperature=temperature, max_tokens=max_tokens, top_p=top_p, + best_of=best_of, + echo=echo, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + logprobs=logprobs, + n=n, + presence_penalty=presence_penalty, + seed=seed, + stop=stop, + suffix=suffix, + user=user, + stream_options=stream_options, **kwargs, ) diff --git a/portkey_ai/api_resources/apis/fine_tuning.py b/portkey_ai/api_resources/apis/fine_tuning.py index 237ca92a..0b12f4e1 100644 --- a/portkey_ai/api_resources/apis/fine_tuning.py +++ b/portkey_ai/api_resources/apis/fine_tuning.py @@ -35,6 +35,7 @@ def create( integrations: Union[ Optional[Iterable[job_create_params.Integration]], NotGiven ] = NOT_GIVEN, + method: Union[job_create_params.Method, NotGiven] = NOT_GIVEN, seed: Union[Optional[int], NotGiven] = NOT_GIVEN, suffix: Union[Optional[str], NotGiven] = NOT_GIVEN, validation_file: Union[Optional[str], NotGiven] = NOT_GIVEN, @@ -45,6 +46,7 @@ def create( training_file=training_file, hyperparameters=hyperparameters, integrations=integrations, + method=method, seed=seed, suffix=suffix, validation_file=validation_file, @@ -163,6 +165,7 @@ async def create( integrations: Union[ Optional[Iterable[job_create_params.Integration]], NotGiven ] = NOT_GIVEN, + method: Union[job_create_params.Method, NotGiven] = NOT_GIVEN, seed: Union[Optional[int], NotGiven] = NOT_GIVEN, suffix: Union[Optional[str], NotGiven] = NOT_GIVEN, validation_file: Union[Optional[str], NotGiven] = NOT_GIVEN, @@ -173,6 +176,7 @@ async def create( training_file=training_file, hyperparameters=hyperparameters, integrations=integrations, + method=method, seed=seed, suffix=suffix, validation_file=validation_file, diff --git a/portkey_ai/api_resources/apis/images.py b/portkey_ai/api_resources/apis/images.py index 7e8092f0..c9a22660 100644 --- a/portkey_ai/api_resources/apis/images.py +++ b/portkey_ai/api_resources/apis/images.py @@ -13,27 +13,23 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client @typing.no_type_check - def generate( + def create_variation( self, *, - prompt: str, - model: Union[str, NotGiven] = NOT_GIVEN, + image, n: Union[int, NotGiven] = NOT_GIVEN, - quality: Union[str, NotGiven] = NOT_GIVEN, + model: Union[str, NotGiven] = NOT_GIVEN, response_format: Union[str, NotGiven] = NOT_GIVEN, size: Union[str, NotGiven] = NOT_GIVEN, user: Union[str, NotGiven] = NOT_GIVEN, - style: Union[str, NotGiven] = NOT_GIVEN, **kwargs ) -> ImagesResponse: - response = self.openai_client.with_raw_response.images.generate( - prompt=prompt, - model=model, + response = self.openai_client.with_raw_response.images.create_variation( + image=image, n=n, - quality=quality, + model=model, response_format=response_format, size=size, - style=style, user=user, extra_body=kwargs, ) @@ -73,21 +69,27 @@ def edit( return data @typing.no_type_check - def create_variation( + def generate( self, *, - image, + prompt: str, + model: Union[str, NotGiven] = NOT_GIVEN, n: Union[int, NotGiven] = NOT_GIVEN, + quality: Union[str, NotGiven] = NOT_GIVEN, response_format: Union[str, NotGiven] = NOT_GIVEN, size: Union[str, NotGiven] = NOT_GIVEN, user: Union[str, NotGiven] = NOT_GIVEN, + style: Union[str, NotGiven] = NOT_GIVEN, **kwargs ) -> ImagesResponse: - response = self.openai_client.with_raw_response.images.create_variation( - image=image, + response = self.openai_client.with_raw_response.images.generate( + prompt=prompt, + model=model, n=n, + quality=quality, response_format=response_format, size=size, + style=style, user=user, extra_body=kwargs, ) @@ -103,27 +105,21 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client @typing.no_type_check - async def generate( + async def create_variation( self, *, - prompt: str, - model: Union[str, NotGiven] = NOT_GIVEN, + image, n: Union[int, NotGiven] = NOT_GIVEN, - quality: Union[str, NotGiven] = NOT_GIVEN, response_format: Union[str, NotGiven] = NOT_GIVEN, size: Union[str, NotGiven] = NOT_GIVEN, user: Union[str, NotGiven] = NOT_GIVEN, - style: Union[str, NotGiven] = NOT_GIVEN, **kwargs ) -> ImagesResponse: - response = await self.openai_client.with_raw_response.images.generate( - prompt=prompt, - model=model, + response = await self.openai_client.with_raw_response.images.create_variation( + image=image, n=n, - quality=quality, response_format=response_format, size=size, - style=style, user=user, extra_body=kwargs, ) @@ -163,21 +159,27 @@ async def edit( return data @typing.no_type_check - async def create_variation( + async def generate( self, *, - image, + prompt: str, + model: Union[str, NotGiven] = NOT_GIVEN, n: Union[int, NotGiven] = NOT_GIVEN, + quality: Union[str, NotGiven] = NOT_GIVEN, response_format: Union[str, NotGiven] = NOT_GIVEN, size: Union[str, NotGiven] = NOT_GIVEN, user: Union[str, NotGiven] = NOT_GIVEN, + style: Union[str, NotGiven] = NOT_GIVEN, **kwargs ) -> ImagesResponse: - response = await self.openai_client.with_raw_response.images.create_variation( - image=image, + response = await self.openai_client.with_raw_response.images.generate( + prompt=prompt, + model=model, n=n, + quality=quality, response_format=response_format, size=size, + style=style, user=user, extra_body=kwargs, ) diff --git a/portkey_ai/api_resources/apis/main_files.py b/portkey_ai/api_resources/apis/main_files.py index f5063efd..33449e27 100644 --- a/portkey_ai/api_resources/apis/main_files.py +++ b/portkey_ai/api_resources/apis/main_files.py @@ -1,5 +1,6 @@ import json from typing import Any, Union +import typing_extensions from portkey_ai._vendor.openai._types import NOT_GIVEN, NotGiven from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey @@ -25,15 +26,6 @@ def create(self, file, purpose, **kwargs) -> FileObject: return data - def list(self, purpose: Union[str, NotGiven] = NOT_GIVEN, **kwargs) -> FileList: - response = self.openai_client.with_raw_response.files.list( - purpose=purpose, **kwargs - ) - data = FileList(**json.loads(response.text)) - data._headers = response.headers - - return data - def retrieve(self, file_id, **kwargs) -> FileObject: if kwargs: response = self.openai_client.with_raw_response.files.retrieve( @@ -48,6 +40,27 @@ def retrieve(self, file_id, **kwargs) -> FileObject: return data + def list( + self, + *, + purpose: Union[str, NotGiven] = NOT_GIVEN, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[Any, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FileList: + response = self.openai_client.with_raw_response.files.list( + purpose=purpose, + after=after, + limit=limit, + order=order, + **kwargs, + ) + data = FileList(**json.loads(response.text)) + data._headers = response.headers + + return data + def delete(self, file_id, **kwargs) -> FileDeleted: response = self.openai_client.with_raw_response.files.delete( file_id=file_id, extra_body=kwargs @@ -66,6 +79,7 @@ def content(self, file_id, **kwargs) -> Any: response = self.openai_client.files.content(file_id=file_id) return response + @typing_extensions.deprecated("The `.content()` method should be used instead") def retrieve_content(self, file_id, **kwargs) -> Any: if kwargs: response = self.openai_client.files.content( @@ -105,17 +119,6 @@ async def create(self, file, purpose, **kwargs) -> FileObject: return data - async def list( - self, purpose: Union[str, NotGiven] = NOT_GIVEN, **kwargs - ) -> FileList: - response = await self.openai_client.with_raw_response.files.list( - purpose=purpose, **kwargs - ) - data = FileList(**json.loads(response.text)) - data._headers = response.headers - - return data - async def retrieve(self, file_id, **kwargs) -> FileObject: if kwargs: response = await self.openai_client.with_raw_response.files.retrieve( @@ -130,6 +133,27 @@ async def retrieve(self, file_id, **kwargs) -> FileObject: return data + async def list( + self, + *, + purpose: Union[str, NotGiven] = NOT_GIVEN, + after: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[Any, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> FileList: + response = await self.openai_client.with_raw_response.files.list( + purpose=purpose, + after=after, + limit=limit, + order=order, + **kwargs, + ) + data = FileList(**json.loads(response.text)) + data._headers = response.headers + + return data + async def delete(self, file_id, **kwargs) -> FileDeleted: response = await self.openai_client.with_raw_response.files.delete( file_id=file_id, extra_body=kwargs @@ -148,6 +172,7 @@ async def content(self, file_id, **kwargs) -> Any: response = await self.openai_client.files.content(file_id=file_id) return response + @typing_extensions.deprecated("The `.content()` method should be used instead") async def retrieve_content(self, file_id, **kwargs) -> Any: if kwargs: response = await self.openai_client.files.content( diff --git a/portkey_ai/api_resources/apis/models.py b/portkey_ai/api_resources/apis/models.py index 8b4e9322..333a13af 100644 --- a/portkey_ai/api_resources/apis/models.py +++ b/portkey_ai/api_resources/apis/models.py @@ -11,12 +11,6 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def list(self, **kwargs) -> ModelList: - response = self.openai_client.with_raw_response.models.list(**kwargs) - data = ModelList(**json.loads(response.text)) - data._headers = response.headers - return data - def retrieve( self, model: str, *, timeout: Union[float, NotGiven] = NOT_GIVEN, **kwargs ) -> Model: @@ -32,6 +26,12 @@ def retrieve( data._headers = response.headers return data + def list(self, **kwargs) -> ModelList: + response = self.openai_client.with_raw_response.models.list(**kwargs) + data = ModelList(**json.loads(response.text)) + data._headers = response.headers + return data + def delete( self, model: str, *, timeout: Union[float, NotGiven] = NOT_GIVEN, **kwargs ) -> ModelDeleted: @@ -48,12 +48,6 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def list(self, **kwargs) -> ModelList: - response = await self.openai_client.with_raw_response.models.list(**kwargs) - data = ModelList(**json.loads(response.text)) - data._headers = response.headers - return data - async def retrieve( self, model: str, *, timeout: Union[float, NotGiven] = NOT_GIVEN, **kwargs ) -> Model: @@ -69,6 +63,12 @@ async def retrieve( data._headers = response.headers return data + async def list(self, **kwargs) -> ModelList: + response = await self.openai_client.with_raw_response.models.list(**kwargs) + data = ModelList(**json.loads(response.text)) + data._headers = response.headers + return data + async def delete( self, model: str, *, timeout: Union[float, NotGiven] = NOT_GIVEN, **kwargs ) -> ModelDeleted: diff --git a/portkey_ai/api_resources/apis/moderations.py b/portkey_ai/api_resources/apis/moderations.py index 3abe5f9b..c099a4b9 100644 --- a/portkey_ai/api_resources/apis/moderations.py +++ b/portkey_ai/api_resources/apis/moderations.py @@ -1,5 +1,5 @@ import json -from typing import List, Union +from typing import Any, Iterable, List, Union from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from ..._vendor.openai._types import NotGiven, NOT_GIVEN @@ -14,7 +14,7 @@ def __init__(self, client: Portkey) -> None: def create( self, *, - input: Union[str, List[str]], + input: Union[str, List[str], Iterable[Any]], model: Union[str, NotGiven] = NOT_GIVEN, **kwargs ) -> ModerationCreateResponse: @@ -35,7 +35,7 @@ def __init__(self, client: AsyncPortkey) -> None: async def create( self, *, - input: Union[str, List[str]], + input: Union[str, List[str], Iterable[Any]], model: Union[str, NotGiven] = NOT_GIVEN, **kwargs ) -> ModerationCreateResponse: diff --git a/portkey_ai/api_resources/apis/threads.py b/portkey_ai/api_resources/apis/threads.py index 7c4c3607..1d4a101d 100644 --- a/portkey_ai/api_resources/apis/threads.py +++ b/portkey_ai/api_resources/apis/threads.py @@ -1,5 +1,14 @@ import json -from typing import Any, AsyncIterator, Iterable, Iterator, Literal, Optional, Union +from typing import ( + Any, + AsyncIterator, + Iterable, + Iterator, + List, + Literal, + Optional, + Union, +) import typing from portkey_ai._vendor.openai.types.beta.assistant_stream_event import ( @@ -283,6 +292,38 @@ def create( return data + def retrieve(self, thread_id, message_id, **kwargs) -> ThreadMessage: + if kwargs: + response = ( + self.openai_client.with_raw_response.beta.threads.messages.retrieve( + thread_id=thread_id, message_id=message_id, extra_body=kwargs + ) + ) + else: + response = ( + self.openai_client.with_raw_response.beta.threads.messages.retrieve( + thread_id=thread_id, message_id=message_id + ) + ) + data = ThreadMessage(**json.loads(response.text)) + data._headers = response.headers + return data + + def update( + self, + thread_id, + message_id, + *, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + **kwargs, + ) -> ThreadMessage: + response = self.openai_client.with_raw_response.beta.threads.messages.update( + thread_id=thread_id, message_id=message_id, metadata=metadata, **kwargs + ) + data = ThreadMessage(**json.loads(response.text)) + data._headers = response.headers + return data + def list( self, thread_id, @@ -307,31 +348,6 @@ def list( data._headers = response.headers return data - def retrieve(self, thread_id, message_id, **kwargs) -> ThreadMessage: - if kwargs: - response = ( - self.openai_client.with_raw_response.beta.threads.messages.retrieve( - thread_id=thread_id, message_id=message_id, extra_body=kwargs - ) - ) - else: - response = ( - self.openai_client.with_raw_response.beta.threads.messages.retrieve( - thread_id=thread_id, message_id=message_id - ) - ) - data = ThreadMessage(**json.loads(response.text)) - data._headers = response.headers - return data - - def update(self, thread_id, message_id, **kwargs) -> ThreadMessage: - response = self.openai_client.with_raw_response.beta.threads.messages.update( - thread_id=thread_id, message_id=message_id, **kwargs - ) - data = ThreadMessage(**json.loads(response.text)) - data._headers = response.headers - return data - def delete( self, message_id: str, *, thread_id: str, **kwargs ) -> ThreadMessageDeleted: @@ -418,15 +434,6 @@ def retrieve(self, thread_id, run_id, **kwargs) -> Run: return data - def list(self, thread_id, **kwargs) -> RunList: - response = self.openai_client.with_raw_response.beta.threads.runs.list( - thread_id=thread_id, **kwargs - ) - data = RunList(**json.loads(response.text)) - data._headers = response.headers - - return data - def update( self, thread_id, @@ -442,13 +449,25 @@ def update( return data - def submit_tool_outputs(self, thread_id, tool_outputs, run_id, **kwargs) -> Run: - response = ( - self.openai_client.with_raw_response.beta.threads.runs.submit_tool_outputs( - thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, **kwargs - ) + def list( + self, + thread_id, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[NotGiven, Literal["asc", "desc"]] = NOT_GIVEN, + **kwargs, + ) -> RunList: + response = self.openai_client.with_raw_response.beta.threads.runs.list( + thread_id=thread_id, + after=after, + before=before, + limit=limit, + order=order, + **kwargs, ) - data = Run(**json.loads(response.text)) + data = RunList(**json.loads(response.text)) data._headers = response.headers return data @@ -466,6 +485,7 @@ def create_and_poll( self, *, assistant_id: str, + include: Union[List[Any], NotGiven] = NOT_GIVEN, additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, additional_messages: Union[ Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven @@ -475,6 +495,7 @@ def create_and_poll( max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, + parallel_tool_calls: Union[bool, NotGiven] = NOT_GIVEN, response_format: Union[ Optional[AssistantResponseFormatOptionParam], NotGiven ] = NOT_GIVEN, @@ -493,6 +514,7 @@ def create_and_poll( ) -> Run: response = self.openai_client.beta.threads.runs.create_and_poll( assistant_id=assistant_id, + include=include, additional_instructions=additional_instructions, additional_messages=additional_messages, instructions=instructions, @@ -500,6 +522,7 @@ def create_and_poll( max_prompt_tokens=max_prompt_tokens, metadata=metadata, model=model, + parallel_tool_calls=parallel_tool_calls, response_format=response_format, temperature=temperature, tool_choice=tool_choice, @@ -528,6 +551,7 @@ def create_and_stream( max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, + parallel_tool_calls: Union[bool, NotGiven] = NOT_GIVEN, response_format: Union[ Optional[AssistantResponseFormatOptionParam], NotGiven ] = NOT_GIVEN, @@ -556,6 +580,7 @@ def create_and_stream( max_prompt_tokens=max_prompt_tokens, metadata=metadata, model=model, + parallel_tool_calls=parallel_tool_calls, response_format=response_format, temperature=temperature, tool_choice=tool_choice, @@ -588,6 +613,7 @@ def stream( self, *, assistant_id: str, + include: Union[List[Any], NotGiven] = NOT_GIVEN, additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, additional_messages: Union[ Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven @@ -597,6 +623,7 @@ def stream( max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, + parallel_tool_calls: Union[bool, NotGiven] = NOT_GIVEN, response_format: Union[ Optional[AssistantResponseFormatOptionParam], NotGiven ] = NOT_GIVEN, @@ -618,6 +645,7 @@ def stream( ]: response = self.openai_client.beta.threads.runs.stream( assistant_id=assistant_id, + include=include, additional_instructions=additional_instructions, additional_messages=additional_messages, instructions=instructions, @@ -625,6 +653,7 @@ def stream( max_prompt_tokens=max_prompt_tokens, metadata=metadata, model=model, + parallel_tool_calls=parallel_tool_calls, response_format=response_format, temperature=temperature, tool_choice=tool_choice, @@ -638,6 +667,29 @@ def stream( data = response return data + def submit_tool_outputs( + self, + run_id, + *, + thread_id, + tool_outputs, + stream: Union[bool, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> Run: + response = ( + self.openai_client.with_raw_response.beta.threads.runs.submit_tool_outputs( + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + stream=stream, + **kwargs, + ) + ) + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data + def submit_tool_outputs_and_poll( self, *, @@ -687,29 +739,31 @@ def __init__(self, client: Portkey) -> None: super().__init__(client) self.openai_client = client.openai_client - def list(self, thread_id, run_id, **kwargs) -> RunStepList: - response = self.openai_client.with_raw_response.beta.threads.runs.steps.list( - thread_id=thread_id, run_id=run_id, **kwargs - ) - data = RunStepList(**json.loads(response.text)) - data._headers = response.headers - - return data - - def retrieve(self, thread_id, run_id, step_id, **kwargs) -> RunStep: + def retrieve( + self, + thread_id, + run_id, + step_id, + include: Union[List[Any], NotGiven] = NOT_GIVEN, + **kwargs, + ) -> RunStep: if kwargs: response = ( self.openai_client.with_raw_response.beta.threads.runs.steps.retrieve( thread_id=thread_id, run_id=run_id, step_id=step_id, + include=include, extra_body=kwargs, ) ) else: response = ( self.openai_client.with_raw_response.beta.threads.runs.steps.retrieve( - thread_id=thread_id, run_id=run_id, step_id=step_id + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + include=include, ) ) data = RunStep(**json.loads(response.text)) @@ -717,6 +771,33 @@ def retrieve(self, thread_id, run_id, step_id, **kwargs) -> RunStep: return data + def list( + self, + run_id, + *, + thread_id, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + include: Union[List[Any], NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[Any, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> RunStepList: + response = self.openai_client.with_raw_response.beta.threads.runs.steps.list( + thread_id=thread_id, + run_id=run_id, + after=after, + before=before, + include=include, + limit=limit, + order=order, + **kwargs, + ) + data = RunStepList(**json.loads(response.text)) + data._headers = response.headers + + return data + class AsyncThreads(AsyncAPIResource): def __init__(self, client: AsyncPortkey) -> None: @@ -965,6 +1046,36 @@ async def create( return data + async def retrieve(self, thread_id, message_id, **kwargs) -> ThreadMessage: + if kwargs: + response = await self.openai_client.with_raw_response.beta.threads.messages.retrieve( # noqa: E501 + thread_id=thread_id, message_id=message_id, extra_body=kwargs + ) + else: + response = await self.openai_client.with_raw_response.beta.threads.messages.retrieve( # noqa: E501 + thread_id=thread_id, message_id=message_id + ) + data = ThreadMessage(**json.loads(response.text)) + data._headers = response.headers + return data + + async def update( + self, + thread_id, + message_id, + *, + metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, + **kwargs, + ) -> ThreadMessage: + response = ( + await self.openai_client.with_raw_response.beta.threads.messages.update( + thread_id=thread_id, message_id=message_id, metadata=metadata, **kwargs + ) + ) + data = ThreadMessage(**json.loads(response.text)) + data._headers = response.headers + return data + async def list( self, thread_id, @@ -991,29 +1102,6 @@ async def list( data._headers = response.headers return data - async def retrieve(self, thread_id, message_id, **kwargs) -> ThreadMessage: - if kwargs: - response = await self.openai_client.with_raw_response.beta.threads.messages.retrieve( # noqa: E501 - thread_id=thread_id, message_id=message_id, extra_body=kwargs - ) - else: - response = await self.openai_client.with_raw_response.beta.threads.messages.retrieve( # noqa: E501 - thread_id=thread_id, message_id=message_id - ) - data = ThreadMessage(**json.loads(response.text)) - data._headers = response.headers - return data - - async def update(self, thread_id, message_id, **kwargs) -> ThreadMessage: - response = ( - await self.openai_client.with_raw_response.beta.threads.messages.update( - thread_id=thread_id, message_id=message_id, **kwargs - ) - ) - data = ThreadMessage(**json.loads(response.text)) - data._headers = response.headers - return data - async def delete( self, message_id: str, *, thread_id: str, **kwargs ) -> ThreadMessageDeleted: @@ -1104,15 +1192,6 @@ async def retrieve(self, thread_id, run_id, **kwargs) -> Run: return data - async def list(self, thread_id, **kwargs) -> RunList: - response = await self.openai_client.with_raw_response.beta.threads.runs.list( - thread_id=thread_id, **kwargs - ) - data = RunList(**json.loads(response.text)) - data._headers = response.headers - - return data - async def update( self, thread_id, @@ -1128,23 +1207,25 @@ async def update( return data - async def submit_tool_outputs( - self, thread_id, tool_outputs, run_id, **kwargs - ) -> Run: - # fmt: off - response = await self.openai_client\ - .with_raw_response\ - .beta\ - .threads\ - .runs\ - .submit_tool_outputs( - thread_id=thread_id, - run_id=run_id, - tool_outputs=tool_outputs, - **kwargs + async def list( + self, + thread_id, + *, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[NotGiven, Literal["asc", "desc"]] = NOT_GIVEN, + **kwargs, + ) -> RunList: + response = await self.openai_client.with_raw_response.beta.threads.runs.list( + thread_id=thread_id, + after=after, + before=before, + limit=limit, + order=order, + **kwargs, ) - # fmt: on - data = Run(**json.loads(response.text)) + data = RunList(**json.loads(response.text)) data._headers = response.headers return data @@ -1162,6 +1243,7 @@ async def create_and_poll( self, *, assistant_id: str, + include: Union[List[Any], NotGiven] = NOT_GIVEN, additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, additional_messages: Union[ Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven @@ -1171,6 +1253,7 @@ async def create_and_poll( max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, + parallel_tool_calls: Union[bool, NotGiven] = NOT_GIVEN, response_format: Union[ Optional[AssistantResponseFormatOptionParam], NotGiven ] = NOT_GIVEN, @@ -1189,6 +1272,7 @@ async def create_and_poll( ) -> Run: response = await self.openai_client.beta.threads.runs.create_and_poll( assistant_id=assistant_id, + include=include, additional_instructions=additional_instructions, additional_messages=additional_messages, instructions=instructions, @@ -1196,6 +1280,7 @@ async def create_and_poll( max_prompt_tokens=max_prompt_tokens, metadata=metadata, model=model, + parallel_tool_calls=parallel_tool_calls, response_format=response_format, temperature=temperature, tool_choice=tool_choice, @@ -1224,6 +1309,7 @@ async def create_and_stream( max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, + parallel_tool_calls: Union[bool, NotGiven] = NOT_GIVEN, response_format: Union[ Optional[AssistantResponseFormatOptionParam], NotGiven ] = NOT_GIVEN, @@ -1254,6 +1340,7 @@ async def create_and_stream( max_prompt_tokens=max_prompt_tokens, metadata=metadata, model=model, + parallel_tool_calls=parallel_tool_calls, response_format=response_format, temperature=temperature, tool_choice=tool_choice, @@ -1286,6 +1373,7 @@ async def stream( self, *, assistant_id: str, + include: Union[List[Any], NotGiven] = NOT_GIVEN, additional_instructions: Union[Optional[str], NotGiven] = NOT_GIVEN, additional_messages: Union[ Optional[Iterable[run_create_params.AdditionalMessage]], NotGiven @@ -1295,6 +1383,7 @@ async def stream( max_prompt_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN, metadata: Union[Optional[object], NotGiven] = NOT_GIVEN, model: Union[str, None, NotGiven] = NOT_GIVEN, + parallel_tool_calls: Union[bool, NotGiven] = NOT_GIVEN, response_format: Union[ Optional[AssistantResponseFormatOptionParam], NotGiven ] = NOT_GIVEN, @@ -1318,6 +1407,7 @@ async def stream( ): response = await self.openai_client.beta.threads.runs.stream( assistant_id=assistant_id, + include=include, additional_instructions=additional_instructions, additional_messages=additional_messages, instructions=instructions, @@ -1325,6 +1415,7 @@ async def stream( max_prompt_tokens=max_prompt_tokens, metadata=metadata, model=model, + parallel_tool_calls=parallel_tool_calls, response_format=response_format, temperature=temperature, tool_choice=tool_choice, @@ -1338,6 +1429,34 @@ async def stream( data = response return data + async def submit_tool_outputs( + self, + run_id, + *, + thread_id, + tool_outputs, + stream: Union[bool, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> Run: + # fmt: off + response = await self.openai_client\ + .with_raw_response\ + .beta\ + .threads\ + .runs\ + .submit_tool_outputs( + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + stream=stream, + **kwargs + ) + # fmt: on + data = Run(**json.loads(response.text)) + data._headers = response.headers + + return data + async def submit_tool_outputs_and_poll( self, *, @@ -1389,27 +1508,56 @@ def __init__(self, client: AsyncPortkey) -> None: super().__init__(client) self.openai_client = client.openai_client - async def list(self, thread_id, run_id, **kwargs) -> RunStepList: - response = ( - await self.openai_client.with_raw_response.beta.threads.runs.steps.list( - thread_id=thread_id, run_id=run_id, **kwargs - ) - ) - data = RunStepList(**json.loads(response.text)) - data._headers = response.headers - - return data - - async def retrieve(self, thread_id, run_id, step_id, **kwargs) -> RunStep: + async def retrieve( + self, + thread_id, + run_id, + step_id, + include: Union[List[Any], NotGiven] = NOT_GIVEN, + **kwargs, + ) -> RunStep: if kwargs: response = await self.openai_client.with_raw_response.beta.threads.runs.steps.retrieve( # noqa: E501 - thread_id=thread_id, run_id=run_id, step_id=step_id, extra_body=kwargs + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + include=include, + extra_body=kwargs, ) else: response = await self.openai_client.with_raw_response.beta.threads.runs.steps.retrieve( # noqa: E501 - thread_id=thread_id, run_id=run_id, step_id=step_id + thread_id=thread_id, run_id=run_id, step_id=step_id, include=include ) data = RunStep(**json.loads(response.text)) data._headers = response.headers return data + + async def list( + self, + run_id, + *, + thread_id, + after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, + include: Union[List[Any], NotGiven] = NOT_GIVEN, + limit: Union[int, NotGiven] = NOT_GIVEN, + order: Union[Any, NotGiven] = NOT_GIVEN, + **kwargs, + ) -> RunStepList: + response = ( + await self.openai_client.with_raw_response.beta.threads.runs.steps.list( + thread_id=thread_id, + run_id=run_id, + after=after, + before=before, + include=include, + limit=limit, + order=order, + **kwargs, + ) + ) + data = RunStepList(**json.loads(response.text)) + data._headers = response.headers + + return data diff --git a/portkey_ai/api_resources/apis/uploads.py b/portkey_ai/api_resources/apis/uploads.py index 49364490..64343590 100644 --- a/portkey_ai/api_resources/apis/uploads.py +++ b/portkey_ai/api_resources/apis/uploads.py @@ -1,5 +1,7 @@ import json +import os from typing import Any, List, Union +import typing from portkey_ai.api_resources.apis.api_resource import APIResource, AsyncAPIResource from portkey_ai.api_resources.client import AsyncPortkey, Portkey from portkey_ai.api_resources.types.upload_types import Upload, UploadPart @@ -12,6 +14,29 @@ def __init__(self, client: Portkey) -> None: self.openai_client = client.openai_client self.parts = Parts(client) + @typing.no_type_check + def upload_file_chunked( + self, + *, + file: Union[os.PathLike[str], bytes], + mime_type: str, + purpose: Any, + filename: Union[str, None] = None, + bytes: Union[int, None] = None, + part_size: Union[int, None] = None, + md5: Union[str, NotGiven] = NOT_GIVEN, + ) -> Any: + response = self.openai_client.uploads.upload_file_chunked( + file=file, + mime_type=mime_type, + purpose=purpose, + filename=filename, + bytes=bytes, + part_size=part_size, + md5=md5, + ) + return response + def create( self, *, bytes: int, filename: str, mime_type: str, purpose: Any, **kwargs ) -> Upload: @@ -45,7 +70,7 @@ def complete( *, part_ids: List[str], md5: Union[str, NotGiven] = NOT_GIVEN, - **kwargs + **kwargs, ) -> Upload: extra_headers = kwargs.pop("extra_headers", {}) response = self.openai_client.with_raw_response.uploads.complete( @@ -86,6 +111,29 @@ def __init__(self, client: AsyncPortkey) -> None: self.openai_client = client.openai_client self.parts = AsyncParts(client) + @typing.no_type_check + async def upload_file_chunked( + self, + *, + file: Union[os.PathLike[str], bytes], + mime_type: str, + purpose: Any, + filename: Union[str, None] = None, + bytes: Union[int, None] = None, + part_size: Union[int, None] = None, + md5: Union[str, NotGiven] = NOT_GIVEN, + ) -> Any: + response = await self.openai_client.uploads.upload_file_chunked( + file=file, + mime_type=mime_type, + purpose=purpose, + filename=filename, + bytes=bytes, + part_size=part_size, + md5=md5, + ) + return response + async def create( self, *, bytes: int, filename: str, mime_type: str, purpose: Any, **kwargs ) -> Upload: @@ -119,7 +167,7 @@ async def complete( *, part_ids: List[str], md5: Union[str, NotGiven] = NOT_GIVEN, - **kwargs + **kwargs, ) -> Upload: extra_headers = kwargs.pop("extra_headers", {}) response = await self.openai_client.with_raw_response.uploads.complete( diff --git a/portkey_ai/api_resources/apis/vector_stores.py b/portkey_ai/api_resources/apis/vector_stores.py index 9258822f..cee336d5 100644 --- a/portkey_ai/api_resources/apis/vector_stores.py +++ b/portkey_ai/api_resources/apis/vector_stores.py @@ -97,12 +97,14 @@ def list( self, *, after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, limit: Union[int, NotGiven] = NOT_GIVEN, order: Union[str, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreList: response = self.openai_client.with_raw_response.beta.vector_stores.list( after=after, + before=before, limit=limit, order=order, **kwargs, @@ -398,7 +400,7 @@ def list_files( order=order, **kwargs, ) - data = VectorStoreFileBatch(**json.loads(response.text)) + data = VectorStoreFileList(**json.loads(response.text)) data._headers = response.headers return data @@ -527,12 +529,14 @@ async def list( self, *, after: Union[str, NotGiven] = NOT_GIVEN, + before: Union[str, NotGiven] = NOT_GIVEN, limit: Union[int, NotGiven] = NOT_GIVEN, order: Union[str, NotGiven] = NOT_GIVEN, **kwargs, ) -> VectorStoreList: response = await self.openai_client.with_raw_response.beta.vector_stores.list( after=after, + before=before, limit=limit, order=order, **kwargs, diff --git a/portkey_ai/api_resources/base_client.py b/portkey_ai/api_resources/base_client.py index b5d918cd..dd70adc9 100644 --- a/portkey_ai/api_resources/base_client.py +++ b/portkey_ai/api_resources/base_client.py @@ -53,6 +53,7 @@ def __init__( base_url: Optional[str] = None, api_key: Optional[str] = None, virtual_key: Optional[str] = None, + websocket_base_url: Optional[Union[str, httpx.URL]] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -87,6 +88,7 @@ def __init__( self.base_url = set_base_url(base_url, api_key) self.api_key = default_api_key(self.base_url, api_key) self.virtual_key = virtual_key + self.websocket_base_url = websocket_base_url self.config = config self.provider = provider self.trace_id = trace_id @@ -697,6 +699,7 @@ def __init__( base_url: Optional[str] = None, api_key: Optional[str] = None, virtual_key: Optional[str] = None, + websocket_base_url: Optional[Union[str, httpx.URL]] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -731,6 +734,7 @@ def __init__( self.base_url = set_base_url(base_url, api_key) self.api_key = default_api_key(self.base_url, api_key) self.virtual_key = virtual_key + self.websocket_base_url = websocket_base_url self.config = config self.provider = provider self.trace_id = trace_id diff --git a/portkey_ai/api_resources/client.py b/portkey_ai/api_resources/client.py index 0d0951b9..01c6bc89 100644 --- a/portkey_ai/api_resources/client.py +++ b/portkey_ai/api_resources/client.py @@ -38,12 +38,14 @@ class beta: threads: apis.Threads vector_stores: apis.VectorStores chat: apis.BetaChat + realtime: apis.BetaRealtime def __init__(self, client: Portkey) -> None: self.assistants = apis.Assistants(client) self.threads = apis.Threads(client) self.vector_stores = apis.VectorStores(client) self.chat = apis.BetaChat(client) + self.realtime = apis.BetaRealtime(client) def __init__( self, @@ -51,6 +53,7 @@ def __init__( api_key: Optional[str] = None, base_url: Optional[str] = None, virtual_key: Optional[str] = None, + websocket_base_url: Optional[Union[str, httpx.URL]] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -86,6 +89,7 @@ def __init__( api_key=api_key, base_url=base_url, virtual_key=virtual_key, + websocket_base_url=websocket_base_url, config=config, provider=provider, trace_id=trace_id, @@ -124,6 +128,7 @@ def __init__( default_headers=self.allHeaders, http_client=http_client, max_retries=0, + websocket_base_url=self.websocket_base_url, ) self.completions = apis.Completion(self) @@ -153,6 +158,7 @@ def copy( api_key: Optional[str] = None, base_url: Optional[str] = None, virtual_key: Optional[str] = None, + websocket_base_url: Optional[Union[str, httpx.URL]] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -188,6 +194,7 @@ def copy( api_key=api_key or self.api_key, base_url=base_url or self.base_url, virtual_key=virtual_key or self.virtual_key, + websocket_base_url=websocket_base_url or self.websocket_base_url, config=config or self.config, provider=provider or self.provider, trace_id=trace_id or self.trace_id, @@ -255,12 +262,14 @@ class beta: threads: apis.AsyncThreads vector_stores: apis.AsyncVectorStores chat: apis.AsyncBetaChat + realtime: apis.AsyncBetaRealtime def __init__(self, client: AsyncPortkey) -> None: self.assistants = apis.AsyncAssistants(client) self.threads = apis.AsyncThreads(client) self.vector_stores = apis.AsyncVectorStores(client) self.chat = apis.AsyncBetaChat(client) + self.realtime = apis.AsyncBetaRealtime(client) def __init__( self, @@ -268,6 +277,7 @@ def __init__( api_key: Optional[str] = None, base_url: Optional[str] = None, virtual_key: Optional[str] = None, + websocket_base_url: Optional[Union[str, httpx.URL]] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -303,6 +313,7 @@ def __init__( api_key=api_key, base_url=base_url, virtual_key=virtual_key, + websocket_base_url=websocket_base_url, config=config, provider=provider, trace_id=trace_id, @@ -341,6 +352,7 @@ def __init__( default_headers=self.allHeaders, http_client=http_client, max_retries=0, + websocket_base_url=self.websocket_base_url, ) self.completions = apis.AsyncCompletion(self) @@ -370,6 +382,7 @@ def copy( api_key: Optional[str] = None, base_url: Optional[str] = None, virtual_key: Optional[str] = None, + websocket_base_url: Optional[Union[str, httpx.URL]] = None, config: Optional[Union[Mapping, str]] = None, provider: Optional[str] = None, trace_id: Optional[str] = None, @@ -405,6 +418,7 @@ def copy( api_key=api_key or self.api_key, base_url=base_url or self.base_url, virtual_key=virtual_key or self.virtual_key, + websocket_base_url=websocket_base_url or self.websocket_base_url, config=config or self.config, provider=provider or self.provider, trace_id=trace_id or self.trace_id, diff --git a/portkey_ai/api_resources/types/assistant_type.py b/portkey_ai/api_resources/types/assistant_type.py index fa17caa2..9197663b 100644 --- a/portkey_ai/api_resources/types/assistant_type.py +++ b/portkey_ai/api_resources/types/assistant_type.py @@ -17,11 +17,20 @@ "FunctionParameters", "FileSearch", "FileSearchTool", + "ToolResources", + "ToolResourcesCodeInterpreter", + "ToolResourcesFileSearch", ] +class FileSearchRankingOptions(BaseModel, extra="allow"): + score_threshold: Optional[float] = None + ranker: Optional[str] = None + + class FileSearch(BaseModel, extra="allow"): max_num_results: Optional[int] = None + ranking_options: Optional[FileSearchRankingOptions] = None class FileSearchTool(BaseModel, extra="allow"): @@ -48,13 +57,26 @@ class ToolRetrieval(BaseModel, extra="allow"): class ToolFunction(BaseModel, extra="allow"): - type: Optional[str] = None function: Optional[FunctionDefinition] = None + type: Optional[str] = None AssistantTool = Union[ToolCodeInterpreter, ToolRetrieval, ToolFunction, FileSearchTool] +class ToolResourcesCodeInterpreter(BaseModel, extra="allow"): + file_ids: Optional[List[str]] = None + + +class ToolResourcesFileSearch(BaseModel, extra="allow"): + vector_store_ids: Optional[List[str]] = None + + +class ToolResources(BaseModel, extra="allow"): + code_interpreter: Optional[ToolResourcesCodeInterpreter] = None + file_search: Optional[ToolResourcesFileSearch] = None + + class Assistant(BaseModel, extra="allow"): id: Optional[str] = None created_at: Optional[int] = None @@ -67,6 +89,9 @@ class Assistant(BaseModel, extra="allow"): object: Optional[str] = None tools: Optional[List[AssistantTool]] = None response_format: Optional[Any] = None + temperature: Optional[float] = None + tool_resources: Optional[ToolResources] = None + top_p: Optional[float] = None _headers: Optional[httpx.Headers] = PrivateAttr() def __str__(self): diff --git a/portkey_ai/api_resources/types/audio_types.py b/portkey_ai/api_resources/types/audio_types.py index 7ae35605..0a282d7d 100644 --- a/portkey_ai/api_resources/types/audio_types.py +++ b/portkey_ai/api_resources/types/audio_types.py @@ -1,5 +1,5 @@ import json -from typing import Dict, Optional +from typing import Dict, List, Optional import httpx from .utils import parse_headers from typing import Any @@ -8,6 +8,47 @@ __all__ = ["Transcription", "Translation"] +class TranscriptionSegment(BaseModel, extra="allow"): + id: Optional[int] = None + avg_logprob: Optional[float] = None + compression_ratio: Optional[float] = None + end: Optional[float] = None + no_speech_prob: Optional[float] = None + seek: Optional[int] = None + start: Optional[float] = None + temperature: Optional[float] = None + text: Optional[str] = None + tokens: Optional[List[int]] = None + + +class TranscriptionWord(BaseModel, extra="allow"): + start: Optional[float] = None + end: Optional[float] = None + word: Optional[str] = None + + +class TranscriptionVerbose(BaseModel, extra="allow"): + duration: Optional[str] = None + language: Optional[str] = None + text: Optional[str] = None + segments: Optional[List[TranscriptionSegment]] = None + words: Optional[List[TranscriptionWord]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + class Transcription(BaseModel, extra="allow"): text: Optional[str] = None _headers: Optional[httpx.Headers] = PrivateAttr() @@ -26,6 +67,27 @@ def get_headers(self) -> Optional[Dict[str, str]]: return parse_headers(self._headers) +class TranslationVerbose(BaseModel, extra="allow"): + duration: Optional[str] = None + language: Optional[str] = None + text: Optional[str] = None + segments: Optional[List[TranscriptionSegment]] = None + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) + + class Translation(BaseModel, extra="allow"): text: Optional[str] = None _headers: Optional[httpx.Headers] = PrivateAttr() diff --git a/portkey_ai/api_resources/types/beta_realtime.py b/portkey_ai/api_resources/types/beta_realtime.py new file mode 100644 index 00000000..7eed3018 --- /dev/null +++ b/portkey_ai/api_resources/types/beta_realtime.py @@ -0,0 +1,66 @@ +import json +from typing import Any, Dict, List, Optional, Union +import httpx +from pydantic import BaseModel, PrivateAttr +from portkey_ai.api_resources.types.utils import parse_headers + +__all__ = [ + "SessionCreateResponse", + "ClientSecret", + "InputAudioTranscription", + "Tool", + "TurnDetection", +] + + +class ClientSecret(BaseModel, extra="allow"): + expires_at: Optional[int] = None + value: Optional[str] = None + + +class InputAudioTranscription(BaseModel, extra="allow"): + model: Optional[str] = None + + +class Tool(BaseModel, extra="allow"): + description: Optional[str] = None + name: Optional[str] = None + parameters: Optional[object] = None + type: Optional[str] = None + + +class TurnDetection(BaseModel, extra="allow"): + prefix_padding_ms: Optional[int] = None + silence_duration_ms: Optional[int] = None + threshold: Optional[float] = None + type: Optional[str] = None + + +class SessionCreateResponse(BaseModel, extra="allow"): + client_secret: Optional[ClientSecret] = None + input_audio_format: Optional[str] = None + input_audio_transcription: Optional[InputAudioTranscription] = None + instructions: Optional[str] = None + max_response_output_tokens: Union[int, str, None] = None + modalities: Optional[List[str]] = None + output_audio_format: Optional[str] = None + temperature: Optional[float] = None + tool_choice: Optional[str] = None + tools: Optional[List[Tool]] = None + turn_detection: Optional[TurnDetection] = None + voice: Optional[str] = None + + _headers: Optional[httpx.Headers] = PrivateAttr() + + def __str__(self): + del self._headers + return json.dumps(self.dict(), indent=4) + + def __getitem__(self, key): + return getattr(self, key, None) + + def get(self, key: str, default: Optional[Any] = None): + return getattr(self, key, None) or default + + def get_headers(self) -> Optional[Dict[str, str]]: + return parse_headers(self._headers) diff --git a/portkey_ai/api_resources/types/chat_complete_type.py b/portkey_ai/api_resources/types/chat_complete_type.py index 66be5e1c..ba5456c7 100644 --- a/portkey_ai/api_resources/types/chat_complete_type.py +++ b/portkey_ai/api_resources/types/chat_complete_type.py @@ -41,10 +41,24 @@ class ChoiceLogprobs(BaseModel, extra="allow"): refusal: Optional[List[ChatCompletionTokenLogprob]] = None +class CompletionTokensDetails(BaseModel, extra="allow"): + accepted_prediction_tokens: Optional[int] = None + audio_tokens: Optional[int] = None + reasoning_tokens: Optional[int] = None + rejected_prediction_tokens: Optional[int] = None + + +class PromptTokensDetails(BaseModel, extra="allow"): + audio_tokens: Optional[int] = None + cached_tokens: Optional[int] = None + + class Usage(BaseModel, extra="allow"): prompt_tokens: Optional[int] = None completion_tokens: Optional[int] = None total_tokens: Optional[int] = None + completion_tokens_details: Optional[CompletionTokensDetails] = None + prompt_tokens_details: Optional[PromptTokensDetails] = None class DeltaToolCallFunction(BaseModel, extra="allow"): @@ -93,12 +107,20 @@ class ChatCompletionMessageToolCall(BaseModel, extra="allow"): type: Optional[str] = None +class ChatCompletionAudio(BaseModel, extra="allow"): + id: Optional[str] = None + data: Optional[str] = None + expires_at: Optional[int] = None + transcript: Optional[str] = None + + class ChatCompletionMessage(BaseModel, extra="allow"): content: Optional[Union[str, Iterable[Any]]] = None role: Optional[str] = None function_call: Optional[FunctionCall] = None tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None refusal: Optional[str] = None + audio: Optional[ChatCompletionAudio] = None class Choice(BaseModel, extra="allow"): @@ -140,6 +162,8 @@ class ChatCompletionChunk(BaseModel, extra="allow"): model: Optional[str] = None choices: Optional[List[StreamChoice]] = None service_tier: Optional[str] = None + system_fingerprint: Optional[str] = None + usage: Optional[Usage] = None def __str__(self): return json.dumps(self.dict(), indent=4) diff --git a/portkey_ai/api_resources/types/fine_tuning_type.py b/portkey_ai/api_resources/types/fine_tuning_type.py index 71a166d0..34169b57 100644 --- a/portkey_ai/api_resources/types/fine_tuning_type.py +++ b/portkey_ai/api_resources/types/fine_tuning_type.py @@ -17,6 +17,11 @@ "FineTuningJobCheckpointList", "FineTuningJobWandbIntegration", "FineTuningJobWandbIntegrationObject", + "MethodDpoHyperparameters", + "MethodSupervisedHyperparameters", + "MethodDpo", + "MethodSupervised", + "Method", ] @@ -27,6 +32,8 @@ class Error(BaseModel, extra="allow"): class Hyperparameters(BaseModel, extra="allow"): + batch_size: Optional[Union[str, int]] = None + learning_rate_multiplier: Optional[Union[str, float]] = None n_epochs: Optional[Union[str, int]] = None @@ -42,6 +49,33 @@ class FineTuningJobWandbIntegrationObject(BaseModel, extra="allow"): wandb: Optional[FineTuningJobWandbIntegration] = None +class MethodDpoHyperparameters(BaseModel, extra="allow"): + batch_size: Optional[Union[str, int]] = None + beta: Optional[Union[str, float]] = None + learning_rate_multiplier: Optional[Union[str, float]] = None + n_epochs: Optional[Union[str, int]] = None + + +class MethodSupervisedHyperparameters(BaseModel, extra="allow"): + batch_size: Optional[Union[str, int]] = None + learning_rate_multiplier: Optional[Union[str, float]] = None + n_epochs: Optional[Union[str, int]] = None + + +class MethodDpo(BaseModel, extra="allow"): + hyperparameters: Optional[MethodDpoHyperparameters] = None + + +class MethodSupervised(BaseModel, extra="allow"): + hyperparameters: Optional[MethodSupervisedHyperparameters] = None + + +class Method(BaseModel, extra="allow"): + dpo: Optional[MethodDpo] = None + supervised: Optional[MethodSupervised] = None + type: Optional[str] = None + + class FineTuningJob(BaseModel, extra="allow"): id: Optional[str] = None created_at: Optional[int] = None @@ -60,6 +94,7 @@ class FineTuningJob(BaseModel, extra="allow"): validation_file: Optional[str] = None estimated_finish: Optional[int] = None integrations: Optional[List[FineTuningJobWandbIntegrationObject]] = None + method: Optional[Method] = None _headers: Optional[httpx.Headers] = PrivateAttr() def __str__(self): @@ -101,6 +136,8 @@ class FineTuningJobEvent(BaseModel, extra="allow"): level: Optional[str] = None message: Optional[str] = None object: Optional[str] = None + data: Optional[Any] = None + type: Optional[str] = None _headers: Optional[httpx.Headers] = PrivateAttr() def __str__(self): diff --git a/portkey_ai/api_resources/types/moderations_type.py b/portkey_ai/api_resources/types/moderations_type.py index 9d9efcb5..75b27bd2 100644 --- a/portkey_ai/api_resources/types/moderations_type.py +++ b/portkey_ai/api_resources/types/moderations_type.py @@ -4,10 +4,70 @@ from .utils import parse_headers from typing import List, Any from pydantic import BaseModel, PrivateAttr -from ..._vendor.openai.types.moderation import Moderation -__all__ = ["ModerationCreateResponse"] +__all__ = [ + "ModerationCreateResponse", + "Categories", + "CategoryAppliedInputTypes", + "CategoryScores", + "Moderation", +] + + +class Categories(BaseModel, extra="allow"): + harassment: Optional[bool] = None + harassment_threatening: Optional[bool] = None + hate: Optional[bool] = None + hate_threatening: Optional[bool] = None + illicit: Optional[bool] = None + illicit_violent: Optional[bool] = None + self_harm: Optional[bool] = None + self_harm_instructions: Optional[bool] = None + self_harm_intent: Optional[bool] = None + sexual: Optional[bool] = None + sexual_minors: Optional[bool] = None + violence: Optional[bool] = None + violence_graphic: Optional[bool] = None + + +class CategoryAppliedInputTypes(BaseModel, extra="allow"): + harassment: Optional[List[str]] = None + harassment_threatening: Optional[List[str]] = None + hate: Optional[List[str]] = None + hate_threatening: Optional[List[str]] = None + illicit: Optional[List[str]] = None + illicit_violent: Optional[List[str]] = None + self_harm: Optional[List[str]] = None + self_harm_instructions: Optional[List[str]] = None + self_harm_intent: Optional[List[str]] = None + sexual: Optional[List[str]] = None + sexual_minors: Optional[List[str]] = None + violence: Optional[List[str]] = None + violence_graphic: Optional[List[str]] = None + + +class CategoryScores(BaseModel, extra="allow"): + harassment: Optional[float] = None + harassment_threatening: Optional[float] = None + hate: Optional[float] = None + hate_threatening: Optional[float] = None + illicit: Optional[float] = None + illicit_violent: Optional[float] = None + self_harm: Optional[float] = None + self_harm_instructions: Optional[float] = None + self_harm_intent: Optional[float] = None + sexual: Optional[float] = None + sexual_minors: Optional[float] = None + violence: Optional[float] = None + violence_graphic: Optional[float] = None + + +class Moderation(BaseModel, extra="allow"): + categories: Optional[Categories] = None + category_applied_input_types: Optional[CategoryAppliedInputTypes] = None + category_scores: Optional[CategoryScores] = None + flagged: Optional[bool] = None class ModerationCreateResponse(BaseModel, extra="allow"): diff --git a/portkey_ai/api_resources/types/thread_message_type.py b/portkey_ai/api_resources/types/thread_message_type.py index f7ceb0a9..b7ea7577 100644 --- a/portkey_ai/api_resources/types/thread_message_type.py +++ b/portkey_ai/api_resources/types/thread_message_type.py @@ -83,7 +83,6 @@ class Text(BaseModel, extra="allow"): class RefusalContentBlock(BaseModel, extra="allow"): refusal: Optional[str] = None - type: Optional[str] = None @@ -104,6 +103,7 @@ class MessageContentText(BaseModel, extra="allow"): class ImageFile(BaseModel, extra="allow"): file_id: Optional[str] = None + detail: Optional[str] = None class MessageContentImageFile(BaseModel, extra="allow"): diff --git a/portkey_ai/api_resources/types/thread_run_type.py b/portkey_ai/api_resources/types/thread_run_type.py index 5ffe8693..9d98a35a 100644 --- a/portkey_ai/api_resources/types/thread_run_type.py +++ b/portkey_ai/api_resources/types/thread_run_type.py @@ -1,5 +1,5 @@ import json -from typing import Dict, Literal, Optional, Union +from typing import Any, Dict, Literal, Optional, Union import httpx from portkey_ai.api_resources.types.assistant_type import AssistantTool @@ -42,6 +42,11 @@ "AssistantToolChoiceFunction", "AssistantToolChoiceOption", "TruncationStrategy", + "FileSearchRankingOptions", + "FileSearchResultContent", + "FileSearchResult", + "FileSearch", + "FileSearchToolCall", ] @@ -91,13 +96,35 @@ class CodeToolCall(BaseModel, extra="allow"): type: Optional[str] = None +class FileSearchRankingOptions(BaseModel, extra="allow"): + ranker: Optional[str] = None + score_threshold: Optional[float] = None + + +class FileSearchResultContent(BaseModel, extra="allow"): + text: Optional[str] = None + type: Optional[str] = None + + +class FileSearchResult(BaseModel, extra="allow"): + file_id: Optional[str] = None + file_name: Optional[str] = None + score: Optional[float] = None + content: Optional[List[FileSearchResultContent]] = None + + +class FileSearch(BaseModel, extra="allow"): + ranking_options: Optional[FileSearchRankingOptions] = None + results: Optional[List[FileSearchResult]] = None + + class FileSearchToolCall(BaseModel, extra="allow"): id: Optional[str] = None - file_search: Optional[object] = None + file_search: Optional[FileSearch] = None type: Optional[str] = None -ToolCall = Union[CodeToolCall, RetrievalToolCall, FunctionToolCall, FileSearchToolCall] +ToolCall = Union[CodeToolCall, FileSearchToolCall, RetrievalToolCall, FunctionToolCall] class ToolCallsStepDetails(BaseModel, extra="allow"): @@ -226,6 +253,7 @@ class Run(BaseModel, extra="allow"): truncation_strategy: Optional[TruncationStrategy] = None temperature: Optional[float] = None top_p: Optional[float] = None + response_format: Optional[Any] = None _headers: Optional[httpx.Headers] = PrivateAttr() def __str__(self): diff --git a/portkey_ai/api_resources/types/thread_type.py b/portkey_ai/api_resources/types/thread_type.py index f980d4a2..4c4992fb 100644 --- a/portkey_ai/api_resources/types/thread_type.py +++ b/portkey_ai/api_resources/types/thread_type.py @@ -24,7 +24,6 @@ class ToolResourcesFileSearch(BaseModel, extra="allow"): class ToolResources(BaseModel, extra="allow"): code_interpreter: Optional[ToolResourcesCodeInterpreter] = None - file_search: Optional[ToolResourcesFileSearch] = None diff --git a/portkey_ai/api_resources/types/vector_stores_type.py b/portkey_ai/api_resources/types/vector_stores_type.py index 3ff4a406..9ebc9f71 100644 --- a/portkey_ai/api_resources/types/vector_stores_type.py +++ b/portkey_ai/api_resources/types/vector_stores_type.py @@ -4,10 +4,6 @@ import httpx from portkey_ai._vendor.openai._utils._transform import PropertyInfo -from portkey_ai._vendor.openai.types.beta.vector_stores.vector_store_file import ( - ChunkingStrategyOther, - ChunkingStrategyStatic, -) from .utils import parse_headers from pydantic import BaseModel, PrivateAttr @@ -92,8 +88,22 @@ class LastError(BaseModel, extra="allow"): message: Optional[str] = None +class StaticFileChunkingStrategy(BaseModel, extra="allow"): + chunk_overlap_tokens: Optional[int] = None + max_chunk_size_tokens: Optional[int] = None + + +class StaticFileChunkingStrategyObject(BaseModel, extra="allow"): + static: StaticFileChunkingStrategy + type: Optional[str] = None + + +class OtherFileChunkingStrategyObject(BaseModel, extra="allow"): + type: Optional[str] = None + + ChunkingStrategy: TypeAlias = Annotated[ - Union[ChunkingStrategyStatic, ChunkingStrategyOther], + Union[StaticFileChunkingStrategyObject, OtherFileChunkingStrategyObject], PropertyInfo(discriminator="type"), ] diff --git a/vendorize.toml b/vendorize.toml index 772c4675..e06951fb 100644 --- a/vendorize.toml +++ b/vendorize.toml @@ -1,4 +1,4 @@ target = "portkey_ai/_vendor" packages = [ - "openai==1.40.1" + "openai==1.58.1" ] \ No newline at end of file