Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions portkey_ai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,16 @@
AsyncConversations,
ConversationsItems,
AsyncConversationsItems,
Videos,
AsyncVideos,
ChatKit,
AsyncChatKit,
ChatKitSessions,
AsyncChatKitSessions,
ChatKitThreads,
AsyncChatKitThreads,
Calls,
AsyncCalls,
)

from portkey_ai.version import VERSION
Expand Down Expand Up @@ -305,4 +315,14 @@
"AsyncConversations",
"ConversationsItems",
"AsyncConversationsItems",
"Videos",
"AsyncVideos",
"ChatKit",
"AsyncChatKit",
"ChatKitSessions",
"AsyncChatKitSessions",
"ChatKitThreads",
"AsyncChatKitThreads",
"Calls",
"AsyncCalls",
]
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
Metadata-Version: 2.3
Name: openai
Version: 1.107.2
Version: 2.2.0
Summary: The official Python library for the openai API
Project-URL: Homepage, https://github.com/openai/openai-python
Project-URL: Repository, https://github.com/openai/openai-python
Expand Down

Large diffs are not rendered by default.

5 changes: 4 additions & 1 deletion portkey_ai/_vendor/openai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from typing_extensions import override

from . import types
from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes
from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes, omit, not_given
from ._utils import file_from_path
from ._client import Client, OpenAI, Stream, Timeout, Transport, AsyncClient, AsyncOpenAI, AsyncStream, RequestOptions
from ._models import BaseModel
Expand Down Expand Up @@ -46,7 +46,9 @@
"ProxiesTypes",
"NotGiven",
"NOT_GIVEN",
"not_given",
"Omit",
"omit",
"OpenAIError",
"APIError",
"APIStatusError",
Expand Down Expand Up @@ -377,6 +379,7 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction]
files as files,
images as images,
models as models,
videos as videos,
batches as batches,
uploads as uploads,
realtime as realtime,
Expand Down
18 changes: 9 additions & 9 deletions portkey_ai/_vendor/openai/_base_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@
from ._qs import Querystring
from ._files import to_httpx_files, async_to_httpx_files
from ._types import (
NOT_GIVEN,
Body,
Omit,
Query,
Expand All @@ -57,6 +56,7 @@
RequestOptions,
HttpxRequestFiles,
ModelBuilderProtocol,
not_given,
)
from ._utils import SensitiveHeadersFilter, is_dict, is_list, asyncify, is_given, lru_cache, is_mapping
from ._compat import PYDANTIC_V1, model_copy, model_dump
Expand Down Expand Up @@ -147,9 +147,9 @@ def __init__(
def __init__(
self,
*,
url: URL | NotGiven = NOT_GIVEN,
json: Body | NotGiven = NOT_GIVEN,
params: Query | NotGiven = NOT_GIVEN,
url: URL | NotGiven = not_given,
json: Body | NotGiven = not_given,
params: Query | NotGiven = not_given,
) -> None:
self.url = url
self.json = json
Expand Down Expand Up @@ -597,7 +597,7 @@ def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalReques
# we internally support defining a temporary header to override the
# default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response`
# see _response.py for implementation details
override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, NOT_GIVEN)
override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, not_given)
if is_given(override_cast_to):
options.headers = headers
return cast(Type[ResponseT], override_cast_to)
Expand Down Expand Up @@ -844,7 +844,7 @@ def __init__(
version: str,
base_url: str | URL,
max_retries: int = DEFAULT_MAX_RETRIES,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.Client | None = None,
custom_headers: Mapping[str, str] | None = None,
custom_query: Mapping[str, object] | None = None,
Expand Down Expand Up @@ -1390,7 +1390,7 @@ def __init__(
base_url: str | URL,
_strict_response_validation: bool,
max_retries: int = DEFAULT_MAX_RETRIES,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.AsyncClient | None = None,
custom_headers: Mapping[str, str] | None = None,
custom_query: Mapping[str, object] | None = None,
Expand Down Expand Up @@ -1867,8 +1867,8 @@ def make_request_options(
extra_query: Query | None = None,
extra_body: Body | None = None,
idempotency_key: str | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
post_parser: PostParser | NotGiven = NOT_GIVEN,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
post_parser: PostParser | NotGiven = not_given,
) -> RequestOptions:
"""Create a dict of type RequestOptions without keys of NotGiven values."""
options: RequestOptions = {}
Expand Down
54 changes: 46 additions & 8 deletions portkey_ai/_vendor/openai/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,21 @@
from __future__ import annotations

import os
from typing import TYPE_CHECKING, Any, Union, Mapping, Callable, Awaitable
from typing import TYPE_CHECKING, Any, Mapping, Callable, Awaitable
from typing_extensions import Self, override

import httpx

from . import _exceptions
from ._qs import Querystring
from ._types import (
NOT_GIVEN,
Omit,
Timeout,
NotGiven,
Transport,
ProxiesTypes,
RequestOptions,
not_given,
)
from ._utils import (
is_given,
Expand All @@ -44,6 +44,7 @@
files,
images,
models,
videos,
batches,
uploads,
realtime,
Expand All @@ -59,6 +60,7 @@
from .resources.files import Files, AsyncFiles
from .resources.images import Images, AsyncImages
from .resources.models import Models, AsyncModels
from .resources.videos import Videos, AsyncVideos
from .resources.batches import Batches, AsyncBatches
from .resources.webhooks import Webhooks, AsyncWebhooks
from .resources.beta.beta import Beta, AsyncBeta
Expand Down Expand Up @@ -103,7 +105,7 @@ def __init__(
webhook_secret: str | None = None,
base_url: str | httpx.URL | None = None,
websocket_base_url: str | httpx.URL | None = None,
timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
timeout: float | Timeout | None | NotGiven = not_given,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
Expand Down Expand Up @@ -288,6 +290,12 @@ def containers(self) -> Containers:

return Containers(self)

@cached_property
def videos(self) -> Videos:
from .resources.videos import Videos

return Videos(self)

@cached_property
def with_raw_response(self) -> OpenAIWithRawResponse:
return OpenAIWithRawResponse(self)
Expand Down Expand Up @@ -339,9 +347,9 @@ def copy(
webhook_secret: str | None = None,
websocket_base_url: str | httpx.URL | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.Client | None = None,
max_retries: int | NotGiven = NOT_GIVEN,
max_retries: int | NotGiven = not_given,
default_headers: Mapping[str, str] | None = None,
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
Expand Down Expand Up @@ -448,7 +456,7 @@ def __init__(
webhook_secret: str | None = None,
base_url: str | httpx.URL | None = None,
websocket_base_url: str | httpx.URL | None = None,
timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
timeout: float | Timeout | None | NotGiven = not_given,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
Expand Down Expand Up @@ -633,6 +641,12 @@ def containers(self) -> AsyncContainers:

return AsyncContainers(self)

@cached_property
def videos(self) -> AsyncVideos:
from .resources.videos import AsyncVideos

return AsyncVideos(self)

@cached_property
def with_raw_response(self) -> AsyncOpenAIWithRawResponse:
return AsyncOpenAIWithRawResponse(self)
Expand Down Expand Up @@ -684,9 +698,9 @@ def copy(
webhook_secret: str | None = None,
websocket_base_url: str | httpx.URL | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.AsyncClient | None = None,
max_retries: int | NotGiven = NOT_GIVEN,
max_retries: int | NotGiven = not_given,
default_headers: Mapping[str, str] | None = None,
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
Expand Down Expand Up @@ -883,6 +897,12 @@ def containers(self) -> containers.ContainersWithRawResponse:

return ContainersWithRawResponse(self._client.containers)

@cached_property
def videos(self) -> videos.VideosWithRawResponse:
from .resources.videos import VideosWithRawResponse

return VideosWithRawResponse(self._client.videos)


class AsyncOpenAIWithRawResponse:
_client: AsyncOpenAI
Expand Down Expand Up @@ -998,6 +1018,12 @@ def containers(self) -> containers.AsyncContainersWithRawResponse:

return AsyncContainersWithRawResponse(self._client.containers)

@cached_property
def videos(self) -> videos.AsyncVideosWithRawResponse:
from .resources.videos import AsyncVideosWithRawResponse

return AsyncVideosWithRawResponse(self._client.videos)


class OpenAIWithStreamedResponse:
_client: OpenAI
Expand Down Expand Up @@ -1113,6 +1139,12 @@ def containers(self) -> containers.ContainersWithStreamingResponse:

return ContainersWithStreamingResponse(self._client.containers)

@cached_property
def videos(self) -> videos.VideosWithStreamingResponse:
from .resources.videos import VideosWithStreamingResponse

return VideosWithStreamingResponse(self._client.videos)


class AsyncOpenAIWithStreamedResponse:
_client: AsyncOpenAI
Expand Down Expand Up @@ -1228,6 +1260,12 @@ def containers(self) -> containers.AsyncContainersWithStreamingResponse:

return AsyncContainersWithStreamingResponse(self._client.containers)

@cached_property
def videos(self) -> videos.AsyncVideosWithStreamingResponse:
from .resources.videos import AsyncVideosWithStreamingResponse

return AsyncVideosWithStreamingResponse(self._client.videos)


Client = OpenAI

Expand Down
14 changes: 10 additions & 4 deletions portkey_ai/_vendor/openai/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,14 +281,15 @@ def model_dump(
mode: Literal["json", "python"] | str = "python",
include: IncEx | None = None,
exclude: IncEx | None = None,
by_alias: bool = False,
by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
context: dict[str, Any] | None = None,
serialize_as_any: bool = False,
fallback: Callable[[Any], Any] | None = None,
) -> dict[str, Any]:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump

Expand Down Expand Up @@ -320,10 +321,12 @@ def model_dump(
raise ValueError("context is only supported in Pydantic v2")
if serialize_as_any != False:
raise ValueError("serialize_as_any is only supported in Pydantic v2")
if fallback is not None:
raise ValueError("fallback is only supported in Pydantic v2")
dumped = super().dict( # pyright: ignore[reportDeprecated]
include=include,
exclude=exclude,
by_alias=by_alias,
by_alias=by_alias if by_alias is not None else False,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
Expand All @@ -338,13 +341,14 @@ def model_dump_json(
indent: int | None = None,
include: IncEx | None = None,
exclude: IncEx | None = None,
by_alias: bool = False,
by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
context: dict[str, Any] | None = None,
fallback: Callable[[Any], Any] | None = None,
serialize_as_any: bool = False,
) -> str:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json
Expand Down Expand Up @@ -373,11 +377,13 @@ def model_dump_json(
raise ValueError("context is only supported in Pydantic v2")
if serialize_as_any != False:
raise ValueError("serialize_as_any is only supported in Pydantic v2")
if fallback is not None:
raise ValueError("fallback is only supported in Pydantic v2")
return super().json( # type: ignore[reportDeprecated]
indent=indent,
include=include,
exclude=exclude,
by_alias=by_alias,
by_alias=by_alias if by_alias is not None else False,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
Expand Down
8 changes: 8 additions & 0 deletions portkey_ai/_vendor/openai/_module_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from .resources.files import Files
from .resources.images import Images
from .resources.models import Models
from .resources.videos import Videos
from .resources.batches import Batches
from .resources.webhooks import Webhooks
from .resources.beta.beta import Beta
Expand Down Expand Up @@ -72,6 +73,12 @@ def __load__(self) -> Models:
return _load_client().models


class VideosProxy(LazyProxy["Videos"]):
@override
def __load__(self) -> Videos:
return _load_client().videos


class BatchesProxy(LazyProxy["Batches"]):
@override
def __load__(self) -> Batches:
Expand Down Expand Up @@ -151,6 +158,7 @@ def __load__(self) -> Conversations:
evals: Evals = EvalsProxy().__as_proxied__()
images: Images = ImagesProxy().__as_proxied__()
models: Models = ModelsProxy().__as_proxied__()
videos: Videos = VideosProxy().__as_proxied__()
batches: Batches = BatchesProxy().__as_proxied__()
uploads: Uploads = UploadsProxy().__as_proxied__()
webhooks: Webhooks = WebhooksProxy().__as_proxied__()
Expand Down
Loading