diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 30556c9..5d9ca45 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d5dbf5a-62be-411a-9c7b-bc7b6dc79e13 management: - docChecksum: a690daf53c97c95bb6c912a2c59f804d + docChecksum: 5162ce13f49e729b6efa20dd5cdf32be docVersion: 0.0.0 - speakeasyVersion: 1.454.0 - generationVersion: 2.477.0 - releaseVersion: 0.8.0 - configChecksum: 404e2a546fe183c10067017df4896733 + speakeasyVersion: 1.460.3 + generationVersion: 2.484.0 + releaseVersion: 0.9.0 + configChecksum: 40760f888f8f869e4f9b83ad88469b3f repoURL: https://github.com/livepeer/livepeer-ai-python.git installationURL: https://github.com/livepeer/livepeer-ai-python.git published: true @@ -14,15 +14,15 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.6.11 + core: 5.7.4 defaultEnabledRetries: 0.2.0 envVarSecurityUsage: 0.3.2 globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.0.0 + globalServerURLs: 3.1.0 multipartFileContentType: 1.0.0 - nameOverrides: 3.0.0 + nameOverrides: 3.0.1 responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.0.0 @@ -43,7 +43,6 @@ generatedFiles: - docs/models/components/bodygenimagetotextimage.md - docs/models/components/bodygenimagetovideo.md - docs/models/components/bodygenimagetovideoimage.md - - docs/models/components/bodygenllm.md - docs/models/components/bodygensegmentanything2.md - docs/models/components/bodygensegmentanything2image.md - docs/models/components/bodygenupscale.md @@ -55,6 +54,8 @@ generatedFiles: - docs/models/components/imagetotextresponse.md - docs/models/components/livevideotovideoparams.md - docs/models/components/livevideotovideoresponse.md + - docs/models/components/llmmessage.md + - docs/models/components/llmrequest.md - docs/models/components/llmresponse.md - docs/models/components/loc.md - docs/models/components/masksresponse.md @@ -103,7 +104,6 @@ generatedFiles: - src/livepeer_ai/models/components/body_genimagetoimage.py - src/livepeer_ai/models/components/body_genimagetotext.py - src/livepeer_ai/models/components/body_genimagetovideo.py - - src/livepeer_ai/models/components/body_genllm.py - src/livepeer_ai/models/components/body_gensegmentanything2.py - src/livepeer_ai/models/components/body_genupscale.py - src/livepeer_ai/models/components/chunk.py @@ -112,6 +112,8 @@ generatedFiles: - src/livepeer_ai/models/components/imagetotextresponse.py - src/livepeer_ai/models/components/livevideotovideoparams.py - src/livepeer_ai/models/components/livevideotovideoresponse.py + - src/livepeer_ai/models/components/llmmessage.py + - src/livepeer_ai/models/components/llmrequest.py - src/livepeer_ai/models/components/llmresponse.py - src/livepeer_ai/models/components/masksresponse.py - src/livepeer_ai/models/components/media.py @@ -167,7 +169,8 @@ examples: application/json: {"images": []} "400": application/json: {"detail": {"msg": ""}} - "422": {} + "422": + application/json: {} genImageToImage: speakeasy-default-gen-image-to-image: requestBody: @@ -177,7 +180,8 @@ examples: application/json: {"images": []} "400": application/json: {"detail": {"msg": ""}} - "422": {} + "422": + application/json: {} genImageToVideo: speakeasy-default-gen-image-to-video: requestBody: @@ -187,7 +191,8 @@ examples: application/json: {"images": []} "400": application/json: {"detail": {"msg": ""}} - "422": {} + "422": + application/json: {} genUpscale: speakeasy-default-gen-upscale: requestBody: @@ -197,7 +202,8 @@ examples: application/json: {"images": []} "400": application/json: {"detail": {"msg": ""}} - "422": {} + "422": + application/json: {} genAudioToText: speakeasy-default-gen-audio-to-text: requestBody: @@ -207,7 +213,8 @@ examples: application/json: {"text": "", "chunks": []} "400": application/json: {"detail": {"msg": ""}} - "422": {} + "422": + application/json: {} genSegmentAnything2: speakeasy-default-gen-segment-anything2: requestBody: @@ -217,17 +224,20 @@ examples: application/json: {"masks": "", "scores": "", "logits": ""} "400": application/json: {"detail": {"msg": ""}} - "422": {} + "422": + application/json: {} genLLM: speakeasy-default-gen-LLM: requestBody: application/x-www-form-urlencoded: {"prompt": ""} + application/json: {"messages": []} responses: "200": - application/json: {"response": "", "tokens_used": 60712} + application/json: {"response": "", "tokens_used": 60712, "id": "", "model": "Expedition", "created": 755586} "400": application/json: {"detail": {"msg": ""}} - "422": {} + "422": + application/json: {} genImageToText: speakeasy-default-gen-image-to-text: requestBody: @@ -237,7 +247,8 @@ examples: application/json: {"text": ""} "400": application/json: {"detail": {"msg": ""}} - "422": {} + "422": + application/json: {} genLiveVideoToVideo: speakeasy-default-gen-live-video-to-video: requestBody: @@ -247,7 +258,8 @@ examples: application/json: {"subscribe_url": "https://vain-kiss.name", "publish_url": "https://frail-duffel.com"} "400": application/json: {"detail": {"msg": ""}} - "422": {} + "422": + application/json: {} genTextToSpeech: speakeasy-default-gen-text-to-speech: responses: @@ -255,5 +267,6 @@ examples: application/json: {"audio": {"url": "https://accurate-parsnip.net/"}} "400": application/json: {"detail": {"msg": ""}} - "422": {} + "422": + application/json: {} generatedTests: {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index d4839c5..2e6dbf3 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 0.8.0 + version: 0.9.0 additionalDependencies: dev: {} main: {} diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 6bc0521..eaf5a9c 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,12 +1,12 @@ -speakeasyVersion: 1.454.0 +speakeasyVersion: 1.460.3 sources: livepeer_ai-OAS: sourceNamespace: livepeer-ai-oas - sourceRevisionDigest: sha256:5576a967f5fc90918cde4ed9f62630a777c3693dcc4b7801ed595dd7d64b6ed8 - sourceBlobDigest: sha256:96965175cd37993dd71c66039301a689b5a65b4068d704a3f31b91666c0df03b + sourceRevisionDigest: sha256:bfa0844cf9b7f6b2be182a8a8e3f5d074965a7216fbed68ab570f6e3b58587d5 + sourceBlobDigest: sha256:3d8f629c40a46ffe8c1284434daf4e7dded2357d015c6f89f0e8ed8179c96d28 tags: - latest - - speakeasy-sdk-regen-1732061831 + - speakeasy-sdk-regen-1735258605 - 0.0.0 targets: livepeer-ai: @@ -17,10 +17,10 @@ targets: livepeer-ai-python: source: livepeer_ai-OAS sourceNamespace: livepeer-ai-oas - sourceRevisionDigest: sha256:5576a967f5fc90918cde4ed9f62630a777c3693dcc4b7801ed595dd7d64b6ed8 - sourceBlobDigest: sha256:96965175cd37993dd71c66039301a689b5a65b4068d704a3f31b91666c0df03b + sourceRevisionDigest: sha256:bfa0844cf9b7f6b2be182a8a8e3f5d074965a7216fbed68ab570f6e3b58587d5 + sourceBlobDigest: sha256:3d8f629c40a46ffe8c1284434daf4e7dded2357d015c6f89f0e8ed8179c96d28 codeSamplesNamespace: code-samples-python-livepeer-python - codeSamplesRevisionDigest: sha256:9b2d5f4af79f063e244429955ad927a2efd670b5f9a114608eda2aec86c188ed + codeSamplesRevisionDigest: sha256:40ac599a7c83a23f422adee00f2427e8125723499dc86eb82b70ce7554adb481 workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/README.md b/README.md index c202f37..52c9fcb 100644 --- a/README.md +++ b/README.md @@ -58,9 +58,10 @@ with Livepeer( "prompt": "", }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) ```
@@ -80,9 +81,10 @@ async def main(): "prompt": "", }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) asyncio.run(main()) ``` @@ -136,9 +138,10 @@ with Livepeer( }, }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) ``` @@ -162,9 +165,10 @@ with Livepeer( }, RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) ``` @@ -182,9 +186,10 @@ with Livepeer( "prompt": "", }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) ``` @@ -227,9 +232,10 @@ with Livepeer( "prompt": "", }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) except errors.HTTPError as e: # handle e.data: errors.HTTPErrorData @@ -269,9 +275,10 @@ with Livepeer( "prompt": "", }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) ``` @@ -290,9 +297,10 @@ with Livepeer( "prompt": "", }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) ``` @@ -401,9 +409,10 @@ with Livepeer( "prompt": "", }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) ``` diff --git a/RELEASES.md b/RELEASES.md index 216bc4e..6cdb2fe 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -118,4 +118,14 @@ Based on: ### Generated - [python v0.8.0] . ### Releases -- [PyPI v0.8.0] https://pypi.org/project/livepeer-ai/0.8.0 - . \ No newline at end of file +- [PyPI v0.8.0] https://pypi.org/project/livepeer-ai/0.8.0 - . + +## 2024-12-31 00:16:39 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.460.3 (2.484.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v0.9.0] . +### Releases +- [PyPI v0.9.0] https://pypi.org/project/livepeer-ai/0.9.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index 3106116..4200877 100644 --- a/USAGE.md +++ b/USAGE.md @@ -11,9 +11,10 @@ with Livepeer( "prompt": "", }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) ```
@@ -33,9 +34,10 @@ async def main(): "prompt": "", }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) asyncio.run(main()) ``` diff --git a/codeSamples.yaml b/codeSamples.yaml index 5fbe599..83d4b55 100644 --- a/codeSamples.yaml +++ b/codeSamples.yaml @@ -22,9 +22,10 @@ actions: }, }) - if res.text_response is not None: - # handle response - pass + assert res.text_response is not None + + # Handle response + print(res.text_response) - target: $["paths"]["/image-to-image"]["post"] update: x-codeSamples: @@ -45,9 +46,10 @@ actions: }, }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) - target: $["paths"]["/image-to-text"]["post"] update: x-codeSamples: @@ -67,9 +69,10 @@ actions: }, }) - if res.image_to_text_response is not None: - # handle response - pass + assert res.image_to_text_response is not None + + # Handle response + print(res.image_to_text_response) - target: $["paths"]["/image-to-video"]["post"] update: x-codeSamples: @@ -89,9 +92,10 @@ actions: }, }) - if res.video_response is not None: - # handle response - pass + assert res.video_response is not None + + # Handle response + print(res.video_response) - target: $["paths"]["/live-video-to-video"]["post"] update: x-codeSamples: @@ -109,9 +113,10 @@ actions: "publish_url": "https://vain-tabletop.biz", }) - if res.live_video_to_video_response is not None: - # handle response - pass + assert res.live_video_to_video_response is not None + + # Handle response + print(res.live_video_to_video_response) - target: $["paths"]["/llm"]["post"] update: x-codeSamples: @@ -125,12 +130,18 @@ actions: ) as livepeer: res = livepeer.generate.llm(request={ - "prompt": "", + "messages": [ + { + "role": "", + "content": "", + }, + ], }) - if res.llm_response is not None: - # handle response - pass + assert res.llm_response is not None + + # Handle response + print(res.llm_response) - target: $["paths"]["/segment-anything-2"]["post"] update: x-codeSamples: @@ -150,9 +161,10 @@ actions: }, }) - if res.masks_response is not None: - # handle response - pass + assert res.masks_response is not None + + # Handle response + print(res.masks_response) - target: $["paths"]["/text-to-image"]["post"] update: x-codeSamples: @@ -169,9 +181,10 @@ actions: "prompt": "", }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) - target: $["paths"]["/text-to-speech"]["post"] update: x-codeSamples: @@ -186,9 +199,10 @@ actions: res = livepeer.generate.text_to_speech(request={}) - if res.audio_response is not None: - # handle response - pass + assert res.audio_response is not None + + # Handle response + print(res.audio_response) - target: $["paths"]["/upscale"]["post"] update: x-codeSamples: @@ -209,6 +223,7 @@ actions: }, }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) diff --git a/docs/models/components/bodygenllm.md b/docs/models/components/bodygenllm.md deleted file mode 100644 index 5bbdda0..0000000 --- a/docs/models/components/bodygenllm.md +++ /dev/null @@ -1,14 +0,0 @@ -# BodyGenLLM - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `prompt` | *str* | :heavy_check_mark: | N/A | -| `model_id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `system_msg` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | N/A | -| `max_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `history` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/components/llmmessage.md b/docs/models/components/llmmessage.md new file mode 100644 index 0000000..d3ab16a --- /dev/null +++ b/docs/models/components/llmmessage.md @@ -0,0 +1,9 @@ +# LLMMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `role` | *str* | :heavy_check_mark: | N/A | +| `content` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/components/llmrequest.md b/docs/models/components/llmrequest.md new file mode 100644 index 0000000..dc7ca0c --- /dev/null +++ b/docs/models/components/llmrequest.md @@ -0,0 +1,14 @@ +# LLMRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `messages` | List[[components.LLMMessage](../../models/components/llmmessage.md)] | :heavy_check_mark: | N/A | +| `model` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `max_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `top_k` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/components/llmresponse.md b/docs/models/components/llmresponse.md index d7c47f6..11376bd 100644 --- a/docs/models/components/llmresponse.md +++ b/docs/models/components/llmresponse.md @@ -6,4 +6,7 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `response` | *str* | :heavy_check_mark: | N/A | -| `tokens_used` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| `tokens_used` | *int* | :heavy_check_mark: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/sdks/generate/README.md b/docs/sdks/generate/README.md index 8a49268..45edb30 100644 --- a/docs/sdks/generate/README.md +++ b/docs/sdks/generate/README.md @@ -33,9 +33,10 @@ with Livepeer( "prompt": "", }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) ``` @@ -79,9 +80,10 @@ with Livepeer( }, }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) ``` @@ -124,9 +126,10 @@ with Livepeer( }, }) - if res.video_response is not None: - # handle response - pass + assert res.video_response is not None + + # Handle response + print(res.video_response) ``` @@ -170,9 +173,10 @@ with Livepeer( }, }) - if res.image_response is not None: - # handle response - pass + assert res.image_response is not None + + # Handle response + print(res.image_response) ``` @@ -215,9 +219,10 @@ with Livepeer( }, }) - if res.text_response is not None: - # handle response - pass + assert res.text_response is not None + + # Handle response + print(res.text_response) ``` @@ -260,9 +265,10 @@ with Livepeer( }, }) - if res.masks_response is not None: - # handle response - pass + assert res.masks_response is not None + + # Handle response + print(res.masks_response) ``` @@ -299,12 +305,18 @@ with Livepeer( ) as livepeer: res = livepeer.generate.llm(request={ - "prompt": "", + "messages": [ + { + "role": "", + "content": "", + }, + ], }) - if res.llm_response is not None: - # handle response - pass + assert res.llm_response is not None + + # Handle response + print(res.llm_response) ``` @@ -312,7 +324,7 @@ with Livepeer( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `request` | [components.BodyGenLLM](../../models/components/bodygenllm.md) | :heavy_check_mark: | The request object to use for the request. | +| `request` | [components.LLMRequest](../../models/components/llmrequest.md) | :heavy_check_mark: | The request object to use for the request. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -347,9 +359,10 @@ with Livepeer( }, }) - if res.image_to_text_response is not None: - # handle response - pass + assert res.image_to_text_response is not None + + # Handle response + print(res.image_to_text_response) ``` @@ -390,9 +403,10 @@ with Livepeer( "publish_url": "https://vain-tabletop.biz", }) - if res.live_video_to_video_response is not None: - # handle response - pass + assert res.live_video_to_video_response is not None + + # Handle response + print(res.live_video_to_video_response) ``` @@ -430,9 +444,10 @@ with Livepeer( res = livepeer.generate.text_to_speech(request={}) - if res.audio_response is not None: - # handle response - pass + assert res.audio_response is not None + + # Handle response + print(res.audio_response) ``` diff --git a/pylintrc b/pylintrc index 66e1cfa..5080038 100644 --- a/pylintrc +++ b/pylintrc @@ -187,7 +187,8 @@ good-names=i, ex, Run, _, - e + e, + id # Good variable names regexes, separated by a comma. If names match any regex, # they will always be accepted diff --git a/pyproject.toml b/pyproject.toml index 34e62c4..8903fc2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "livepeer-ai" -version = "0.8.0" +version = "0.9.0" description = "Python Client SDK for the Livepeer AI API." authors = ["Speakeasy",] readme = "README-PYPI.md" diff --git a/src/livepeer_ai/_version.py b/src/livepeer_ai/_version.py index b53c12d..e2659f7 100644 --- a/src/livepeer_ai/_version.py +++ b/src/livepeer_ai/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "livepeer-ai" -__version__: str = "0.8.0" +__version__: str = "0.9.0" try: if __package__ is not None: diff --git a/src/livepeer_ai/basesdk.py b/src/livepeer_ai/basesdk.py index 4783332..91d4a01 100644 --- a/src/livepeer_ai/basesdk.py +++ b/src/livepeer_ai/basesdk.py @@ -10,7 +10,7 @@ ) from livepeer_ai.models import errors from livepeer_ai.utils import RetryConfig, SerializedRequestBody, get_body_content -from typing import Callable, List, Optional, Tuple +from typing import Callable, List, Mapping, Optional, Tuple from urllib.parse import parse_qs, urlparse @@ -20,7 +20,7 @@ class BaseSDK: def __init__(self, sdk_config: SDKConfiguration) -> None: self.sdk_configuration = sdk_config - def get_url(self, base_url, url_variables): + def _get_url(self, base_url, url_variables): sdk_url, sdk_variables = self.sdk_configuration.get_server_details() if base_url is None: @@ -31,7 +31,7 @@ def get_url(self, base_url, url_variables): return utils.template_url(base_url, url_variables) - def build_request_async( + def _build_request_async( self, method, path, @@ -50,9 +50,10 @@ def build_request_async( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: client = self.sdk_configuration.async_client - return self.build_request_with_client( + return self._build_request_with_client( client, method, path, @@ -69,9 +70,10 @@ def build_request_async( timeout_ms, get_serialized_body, url_override, + http_headers, ) - def build_request( + def _build_request( self, method, path, @@ -90,9 +92,10 @@ def build_request( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: client = self.sdk_configuration.client - return self.build_request_with_client( + return self._build_request_with_client( client, method, path, @@ -109,9 +112,10 @@ def build_request( timeout_ms, get_serialized_body, url_override, + http_headers, ) - def build_request_with_client( + def _build_request_with_client( self, client, method, @@ -131,13 +135,14 @@ def build_request_with_client( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: query_params = {} url = url_override if url is None: url = utils.generate_url( - self.get_url(base_url, url_variables), + self._get_url(base_url, url_variables), path, request if request_has_path_params else None, _globals if request_has_path_params else None, @@ -186,6 +191,10 @@ def build_request_with_client( ): headers["content-type"] = serialized_request_body.media_type + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + timeout = timeout_ms / 1000 if timeout_ms is not None else None return client.build_request( diff --git a/src/livepeer_ai/generate.py b/src/livepeer_ai/generate.py index 770a2be..9d41f4c 100644 --- a/src/livepeer_ai/generate.py +++ b/src/livepeer_ai/generate.py @@ -5,7 +5,7 @@ from livepeer_ai._hooks import HookContext from livepeer_ai.models import components, errors, operations from livepeer_ai.types import BaseModel, OptionalNullable, UNSET -from typing import Any, Optional, Union, cast +from typing import Any, Mapping, Optional, Union, cast class Generate(BaseSDK): @@ -18,6 +18,7 @@ def text_to_image( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenTextToImageResponse: r"""Text To Image @@ -27,6 +28,7 @@ def text_to_image( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -40,7 +42,7 @@ def text_to_image( request = utils.unmarshal(request, components.TextToImageParams) request = cast(components.TextToImageParams, request) - req = self.build_request( + req = self._build_request( method="POST", path="/text-to-image", base_url=base_url, @@ -51,6 +53,7 @@ def text_to_image( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", components.TextToImageParams @@ -115,6 +118,7 @@ async def text_to_image_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenTextToImageResponse: r"""Text To Image @@ -124,6 +128,7 @@ async def text_to_image_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -137,7 +142,7 @@ async def text_to_image_async( request = utils.unmarshal(request, components.TextToImageParams) request = cast(components.TextToImageParams, request) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/text-to-image", base_url=base_url, @@ -148,6 +153,7 @@ async def text_to_image_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", components.TextToImageParams @@ -212,6 +218,7 @@ def image_to_image( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenImageToImageResponse: r"""Image To Image @@ -221,6 +228,7 @@ def image_to_image( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -234,7 +242,7 @@ def image_to_image( request = utils.unmarshal(request, components.BodyGenImageToImage) request = cast(components.BodyGenImageToImage, request) - req = self.build_request( + req = self._build_request( method="POST", path="/image-to-image", base_url=base_url, @@ -245,6 +253,7 @@ def image_to_image( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenImageToImage @@ -309,6 +318,7 @@ async def image_to_image_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenImageToImageResponse: r"""Image To Image @@ -318,6 +328,7 @@ async def image_to_image_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -331,7 +342,7 @@ async def image_to_image_async( request = utils.unmarshal(request, components.BodyGenImageToImage) request = cast(components.BodyGenImageToImage, request) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/image-to-image", base_url=base_url, @@ -342,6 +353,7 @@ async def image_to_image_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenImageToImage @@ -406,6 +418,7 @@ def image_to_video( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenImageToVideoResponse: r"""Image To Video @@ -415,6 +428,7 @@ def image_to_video( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -428,7 +442,7 @@ def image_to_video( request = utils.unmarshal(request, components.BodyGenImageToVideo) request = cast(components.BodyGenImageToVideo, request) - req = self.build_request( + req = self._build_request( method="POST", path="/image-to-video", base_url=base_url, @@ -439,6 +453,7 @@ def image_to_video( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenImageToVideo @@ -503,6 +518,7 @@ async def image_to_video_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenImageToVideoResponse: r"""Image To Video @@ -512,6 +528,7 @@ async def image_to_video_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -525,7 +542,7 @@ async def image_to_video_async( request = utils.unmarshal(request, components.BodyGenImageToVideo) request = cast(components.BodyGenImageToVideo, request) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/image-to-video", base_url=base_url, @@ -536,6 +553,7 @@ async def image_to_video_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenImageToVideo @@ -598,6 +616,7 @@ def upscale( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenUpscaleResponse: r"""Upscale @@ -607,6 +626,7 @@ def upscale( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -620,7 +640,7 @@ def upscale( request = utils.unmarshal(request, components.BodyGenUpscale) request = cast(components.BodyGenUpscale, request) - req = self.build_request( + req = self._build_request( method="POST", path="/upscale", base_url=base_url, @@ -631,6 +651,7 @@ def upscale( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenUpscale @@ -693,6 +714,7 @@ async def upscale_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenUpscaleResponse: r"""Upscale @@ -702,6 +724,7 @@ async def upscale_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -715,7 +738,7 @@ async def upscale_async( request = utils.unmarshal(request, components.BodyGenUpscale) request = cast(components.BodyGenUpscale, request) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/upscale", base_url=base_url, @@ -726,6 +749,7 @@ async def upscale_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenUpscale @@ -790,6 +814,7 @@ def audio_to_text( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenAudioToTextResponse: r"""Audio To Text @@ -799,6 +824,7 @@ def audio_to_text( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -812,7 +838,7 @@ def audio_to_text( request = utils.unmarshal(request, components.BodyGenAudioToText) request = cast(components.BodyGenAudioToText, request) - req = self.build_request( + req = self._build_request( method="POST", path="/audio-to-text", base_url=base_url, @@ -823,6 +849,7 @@ def audio_to_text( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenAudioToText @@ -889,6 +916,7 @@ async def audio_to_text_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenAudioToTextResponse: r"""Audio To Text @@ -898,6 +926,7 @@ async def audio_to_text_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -911,7 +940,7 @@ async def audio_to_text_async( request = utils.unmarshal(request, components.BodyGenAudioToText) request = cast(components.BodyGenAudioToText, request) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/audio-to-text", base_url=base_url, @@ -922,6 +951,7 @@ async def audio_to_text_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenAudioToText @@ -989,6 +1019,7 @@ def segment_anything2( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenSegmentAnything2Response: r"""Segment Anything 2 @@ -998,6 +1029,7 @@ def segment_anything2( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -1011,7 +1043,7 @@ def segment_anything2( request = utils.unmarshal(request, components.BodyGenSegmentAnything2) request = cast(components.BodyGenSegmentAnything2, request) - req = self.build_request( + req = self._build_request( method="POST", path="/segment-anything-2", base_url=base_url, @@ -1022,6 +1054,7 @@ def segment_anything2( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenSegmentAnything2 @@ -1087,6 +1120,7 @@ async def segment_anything2_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenSegmentAnything2Response: r"""Segment Anything 2 @@ -1096,6 +1130,7 @@ async def segment_anything2_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -1109,7 +1144,7 @@ async def segment_anything2_async( request = utils.unmarshal(request, components.BodyGenSegmentAnything2) request = cast(components.BodyGenSegmentAnything2, request) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/segment-anything-2", base_url=base_url, @@ -1120,6 +1155,7 @@ async def segment_anything2_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenSegmentAnything2 @@ -1178,10 +1214,11 @@ async def segment_anything2_async( def llm( self, *, - request: Union[components.BodyGenLLM, components.BodyGenLLMTypedDict], + request: Union[components.LLMRequest, components.LLMRequestTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenLLMResponse: r"""LLM @@ -1191,6 +1228,7 @@ def llm( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -1201,10 +1239,10 @@ def llm( base_url = server_url if not isinstance(request, BaseModel): - request = utils.unmarshal(request, components.BodyGenLLM) - request = cast(components.BodyGenLLM, request) + request = utils.unmarshal(request, components.LLMRequest) + request = cast(components.LLMRequest, request) - req = self.build_request( + req = self._build_request( method="POST", path="/llm", base_url=base_url, @@ -1215,9 +1253,10 @@ def llm( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "form", components.BodyGenLLM + request, False, False, "json", components.LLMRequest ), timeout_ms=timeout_ms, ) @@ -1273,10 +1312,11 @@ def llm( async def llm_async( self, *, - request: Union[components.BodyGenLLM, components.BodyGenLLMTypedDict], + request: Union[components.LLMRequest, components.LLMRequestTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenLLMResponse: r"""LLM @@ -1286,6 +1326,7 @@ async def llm_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -1296,10 +1337,10 @@ async def llm_async( base_url = server_url if not isinstance(request, BaseModel): - request = utils.unmarshal(request, components.BodyGenLLM) - request = cast(components.BodyGenLLM, request) + request = utils.unmarshal(request, components.LLMRequest) + request = cast(components.LLMRequest, request) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/llm", base_url=base_url, @@ -1310,9 +1351,10 @@ async def llm_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "form", components.BodyGenLLM + request, False, False, "json", components.LLMRequest ), timeout_ms=timeout_ms, ) @@ -1374,6 +1416,7 @@ def image_to_text( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenImageToTextResponse: r"""Image To Text @@ -1383,6 +1426,7 @@ def image_to_text( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -1396,7 +1440,7 @@ def image_to_text( request = utils.unmarshal(request, components.BodyGenImageToText) request = cast(components.BodyGenImageToText, request) - req = self.build_request( + req = self._build_request( method="POST", path="/image-to-text", base_url=base_url, @@ -1407,6 +1451,7 @@ def image_to_text( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenImageToText @@ -1473,6 +1518,7 @@ async def image_to_text_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenImageToTextResponse: r"""Image To Text @@ -1482,6 +1528,7 @@ async def image_to_text_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -1495,7 +1542,7 @@ async def image_to_text_async( request = utils.unmarshal(request, components.BodyGenImageToText) request = cast(components.BodyGenImageToText, request) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/image-to-text", base_url=base_url, @@ -1506,6 +1553,7 @@ async def image_to_text_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", components.BodyGenImageToText @@ -1573,6 +1621,7 @@ def live_video_to_video( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenLiveVideoToVideoResponse: r"""Live Video To Video @@ -1582,6 +1631,7 @@ def live_video_to_video( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -1595,7 +1645,7 @@ def live_video_to_video( request = utils.unmarshal(request, components.LiveVideoToVideoParams) request = cast(components.LiveVideoToVideoParams, request) - req = self.build_request( + req = self._build_request( method="POST", path="/live-video-to-video", base_url=base_url, @@ -1606,6 +1656,7 @@ def live_video_to_video( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", components.LiveVideoToVideoParams @@ -1671,6 +1722,7 @@ async def live_video_to_video_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenLiveVideoToVideoResponse: r"""Live Video To Video @@ -1680,6 +1732,7 @@ async def live_video_to_video_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -1693,7 +1746,7 @@ async def live_video_to_video_async( request = utils.unmarshal(request, components.LiveVideoToVideoParams) request = cast(components.LiveVideoToVideoParams, request) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/live-video-to-video", base_url=base_url, @@ -1704,6 +1757,7 @@ async def live_video_to_video_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", components.LiveVideoToVideoParams @@ -1768,6 +1822,7 @@ def text_to_speech( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenTextToSpeechResponse: r"""Text To Speech @@ -1777,6 +1832,7 @@ def text_to_speech( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -1790,7 +1846,7 @@ def text_to_speech( request = utils.unmarshal(request, components.TextToSpeechParams) request = cast(components.TextToSpeechParams, request) - req = self.build_request( + req = self._build_request( method="POST", path="/text-to-speech", base_url=base_url, @@ -1801,6 +1857,7 @@ def text_to_speech( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", components.TextToSpeechParams @@ -1865,6 +1922,7 @@ async def text_to_speech_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> operations.GenTextToSpeechResponse: r"""Text To Speech @@ -1874,6 +1932,7 @@ async def text_to_speech_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -1887,7 +1946,7 @@ async def text_to_speech_async( request = utils.unmarshal(request, components.TextToSpeechParams) request = cast(components.TextToSpeechParams, request) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/text-to-speech", base_url=base_url, @@ -1898,6 +1957,7 @@ async def text_to_speech_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", components.TextToSpeechParams diff --git a/src/livepeer_ai/models/components/__init__.py b/src/livepeer_ai/models/components/__init__.py index 592ef9f..27bde89 100644 --- a/src/livepeer_ai/models/components/__init__.py +++ b/src/livepeer_ai/models/components/__init__.py @@ -26,7 +26,6 @@ BodyGenImageToVideoImageTypedDict, BodyGenImageToVideoTypedDict, ) -from .body_genllm import BodyGenLLM, BodyGenLLMTypedDict from .body_gensegmentanything2 import ( BodyGenSegmentAnything2, BodyGenSegmentAnything2Image, @@ -53,6 +52,8 @@ LiveVideoToVideoResponse, LiveVideoToVideoResponseTypedDict, ) +from .llmmessage import LLMMessage, LLMMessageTypedDict +from .llmrequest import LLMRequest, LLMRequestTypedDict from .llmresponse import LLMResponse, LLMResponseTypedDict from .masksresponse import MasksResponse, MasksResponseTypedDict from .media import Media, MediaTypedDict @@ -88,8 +89,6 @@ "BodyGenImageToVideoImage", "BodyGenImageToVideoImageTypedDict", "BodyGenImageToVideoTypedDict", - "BodyGenLLM", - "BodyGenLLMTypedDict", "BodyGenSegmentAnything2", "BodyGenSegmentAnything2Image", "BodyGenSegmentAnything2ImageTypedDict", @@ -108,6 +107,10 @@ "ImageToTextResponse", "ImageToTextResponseTypedDict", "ImageTypedDict", + "LLMMessage", + "LLMMessageTypedDict", + "LLMRequest", + "LLMRequestTypedDict", "LLMResponse", "LLMResponseTypedDict", "LiveVideoToVideoParams", diff --git a/src/livepeer_ai/models/components/body_genaudiototext.py b/src/livepeer_ai/models/components/body_genaudiototext.py index 2a439d1..e82dced 100644 --- a/src/livepeer_ai/models/components/body_genaudiototext.py +++ b/src/livepeer_ai/models/components/body_genaudiototext.py @@ -17,7 +17,7 @@ class AudioTypedDict(TypedDict): class Audio(BaseModel): file_name: Annotated[ - str, pydantic.Field(alias="audio"), FieldMetadata(multipart=True) + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) ] content: Annotated[ @@ -43,11 +43,7 @@ class BodyGenAudioToTextTypedDict(TypedDict): class BodyGenAudioToText(BaseModel): - audio: Annotated[ - Audio, - pydantic.Field(alias=""), - FieldMetadata(multipart=MultipartFormMetadata(file=True)), - ] + audio: Annotated[Audio, FieldMetadata(multipart=MultipartFormMetadata(file=True))] r"""Uploaded audio file to be transcribed.""" model_id: Annotated[Optional[str], FieldMetadata(multipart=True)] = "" diff --git a/src/livepeer_ai/models/components/body_genimagetoimage.py b/src/livepeer_ai/models/components/body_genimagetoimage.py index f490bb5..d23387e 100644 --- a/src/livepeer_ai/models/components/body_genimagetoimage.py +++ b/src/livepeer_ai/models/components/body_genimagetoimage.py @@ -17,7 +17,7 @@ class ImageTypedDict(TypedDict): class Image(BaseModel): file_name: Annotated[ - str, pydantic.Field(alias="image"), FieldMetadata(multipart=True) + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) ] content: Annotated[ @@ -64,11 +64,7 @@ class BodyGenImageToImage(BaseModel): prompt: Annotated[str, FieldMetadata(multipart=True)] r"""Text prompt(s) to guide image generation.""" - image: Annotated[ - Image, - pydantic.Field(alias=""), - FieldMetadata(multipart=MultipartFormMetadata(file=True)), - ] + image: Annotated[Image, FieldMetadata(multipart=MultipartFormMetadata(file=True))] r"""Uploaded image to modify with the pipeline.""" model_id: Annotated[Optional[str], FieldMetadata(multipart=True)] = "" diff --git a/src/livepeer_ai/models/components/body_genimagetotext.py b/src/livepeer_ai/models/components/body_genimagetotext.py index 63ed6bf..1b1da5f 100644 --- a/src/livepeer_ai/models/components/body_genimagetotext.py +++ b/src/livepeer_ai/models/components/body_genimagetotext.py @@ -17,7 +17,7 @@ class BodyGenImageToTextImageTypedDict(TypedDict): class BodyGenImageToTextImage(BaseModel): file_name: Annotated[ - str, pydantic.Field(alias="image"), FieldMetadata(multipart=True) + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) ] content: Annotated[ @@ -45,7 +45,6 @@ class BodyGenImageToTextTypedDict(TypedDict): class BodyGenImageToText(BaseModel): image: Annotated[ BodyGenImageToTextImage, - pydantic.Field(alias=""), FieldMetadata(multipart=MultipartFormMetadata(file=True)), ] r"""Uploaded image to transform with the pipeline.""" diff --git a/src/livepeer_ai/models/components/body_genimagetovideo.py b/src/livepeer_ai/models/components/body_genimagetovideo.py index b5883bb..8e0f202 100644 --- a/src/livepeer_ai/models/components/body_genimagetovideo.py +++ b/src/livepeer_ai/models/components/body_genimagetovideo.py @@ -17,7 +17,7 @@ class BodyGenImageToVideoImageTypedDict(TypedDict): class BodyGenImageToVideoImage(BaseModel): file_name: Annotated[ - str, pydantic.Field(alias="image"), FieldMetadata(multipart=True) + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) ] content: Annotated[ @@ -59,7 +59,6 @@ class BodyGenImageToVideoTypedDict(TypedDict): class BodyGenImageToVideo(BaseModel): image: Annotated[ BodyGenImageToVideoImage, - pydantic.Field(alias=""), FieldMetadata(multipart=MultipartFormMetadata(file=True)), ] r"""Uploaded image to generate a video from.""" diff --git a/src/livepeer_ai/models/components/body_genllm.py b/src/livepeer_ai/models/components/body_genllm.py deleted file mode 100644 index 14ecff9..0000000 --- a/src/livepeer_ai/models/components/body_genllm.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from livepeer_ai.types import BaseModel -from livepeer_ai.utils import FieldMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class BodyGenLLMTypedDict(TypedDict): - prompt: str - model_id: NotRequired[str] - system_msg: NotRequired[str] - temperature: NotRequired[float] - max_tokens: NotRequired[int] - history: NotRequired[str] - stream: NotRequired[bool] - - -class BodyGenLLM(BaseModel): - prompt: Annotated[str, FieldMetadata(form=True)] - - model_id: Annotated[Optional[str], FieldMetadata(form=True)] = "" - - system_msg: Annotated[Optional[str], FieldMetadata(form=True)] = "" - - temperature: Annotated[Optional[float], FieldMetadata(form=True)] = 0.7 - - max_tokens: Annotated[Optional[int], FieldMetadata(form=True)] = 256 - - history: Annotated[Optional[str], FieldMetadata(form=True)] = "[]" - - stream: Annotated[Optional[bool], FieldMetadata(form=True)] = False diff --git a/src/livepeer_ai/models/components/body_gensegmentanything2.py b/src/livepeer_ai/models/components/body_gensegmentanything2.py index d9ecadd..9e60f3f 100644 --- a/src/livepeer_ai/models/components/body_gensegmentanything2.py +++ b/src/livepeer_ai/models/components/body_gensegmentanything2.py @@ -17,7 +17,7 @@ class BodyGenSegmentAnything2ImageTypedDict(TypedDict): class BodyGenSegmentAnything2Image(BaseModel): file_name: Annotated[ - str, pydantic.Field(alias="image"), FieldMetadata(multipart=True) + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) ] content: Annotated[ @@ -57,7 +57,6 @@ class BodyGenSegmentAnything2TypedDict(TypedDict): class BodyGenSegmentAnything2(BaseModel): image: Annotated[ BodyGenSegmentAnything2Image, - pydantic.Field(alias=""), FieldMetadata(multipart=MultipartFormMetadata(file=True)), ] r"""Image to segment.""" diff --git a/src/livepeer_ai/models/components/body_genupscale.py b/src/livepeer_ai/models/components/body_genupscale.py index 1b6b3b5..f43c201 100644 --- a/src/livepeer_ai/models/components/body_genupscale.py +++ b/src/livepeer_ai/models/components/body_genupscale.py @@ -17,7 +17,7 @@ class BodyGenUpscaleImageTypedDict(TypedDict): class BodyGenUpscaleImage(BaseModel): file_name: Annotated[ - str, pydantic.Field(alias="image"), FieldMetadata(multipart=True) + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) ] content: Annotated[ @@ -53,9 +53,7 @@ class BodyGenUpscale(BaseModel): r"""Text prompt(s) to guide upscaled image generation.""" image: Annotated[ - BodyGenUpscaleImage, - pydantic.Field(alias=""), - FieldMetadata(multipart=MultipartFormMetadata(file=True)), + BodyGenUpscaleImage, FieldMetadata(multipart=MultipartFormMetadata(file=True)) ] r"""Uploaded image to modify with the pipeline.""" diff --git a/src/livepeer_ai/models/components/llmmessage.py b/src/livepeer_ai/models/components/llmmessage.py new file mode 100644 index 0000000..e242a3e --- /dev/null +++ b/src/livepeer_ai/models/components/llmmessage.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from livepeer_ai.types import BaseModel +from typing_extensions import TypedDict + + +class LLMMessageTypedDict(TypedDict): + role: str + content: str + + +class LLMMessage(BaseModel): + role: str + + content: str diff --git a/src/livepeer_ai/models/components/llmrequest.py b/src/livepeer_ai/models/components/llmrequest.py new file mode 100644 index 0000000..86dc0a6 --- /dev/null +++ b/src/livepeer_ai/models/components/llmrequest.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .llmmessage import LLMMessage, LLMMessageTypedDict +from livepeer_ai.types import BaseModel +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class LLMRequestTypedDict(TypedDict): + messages: List[LLMMessageTypedDict] + model: NotRequired[str] + temperature: NotRequired[float] + max_tokens: NotRequired[int] + top_p: NotRequired[float] + top_k: NotRequired[int] + stream: NotRequired[bool] + + +class LLMRequest(BaseModel): + messages: List[LLMMessage] + + model: Optional[str] = "" + + temperature: Optional[float] = 0.7 + + max_tokens: Optional[int] = 256 + + top_p: Optional[float] = 1 + + top_k: Optional[int] = -1 + + stream: Optional[bool] = False diff --git a/src/livepeer_ai/models/components/llmresponse.py b/src/livepeer_ai/models/components/llmresponse.py index 5a53642..b4ee8c1 100644 --- a/src/livepeer_ai/models/components/llmresponse.py +++ b/src/livepeer_ai/models/components/llmresponse.py @@ -8,9 +8,18 @@ class LLMResponseTypedDict(TypedDict): response: str tokens_used: int + id: str + model: str + created: int class LLMResponse(BaseModel): response: str tokens_used: int + + id: str + + model: str + + created: int diff --git a/src/livepeer_ai/sdkconfiguration.py b/src/livepeer_ai/sdkconfiguration.py index e2a4acc..331689f 100644 --- a/src/livepeer_ai/sdkconfiguration.py +++ b/src/livepeer_ai/sdkconfiguration.py @@ -31,9 +31,9 @@ class SDKConfiguration: server_idx: Optional[int] = 0 language: str = "python" openapi_doc_version: str = "0.0.0" - sdk_version: str = "0.8.0" - gen_version: str = "2.477.0" - user_agent: str = "speakeasy-sdk/python 0.8.0 2.477.0 0.0.0 livepeer-ai" + sdk_version: str = "0.9.0" + gen_version: str = "2.484.0" + user_agent: str = "speakeasy-sdk/python 0.9.0 2.484.0 0.0.0 livepeer-ai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/src/livepeer_ai/utils/forms.py b/src/livepeer_ai/utils/forms.py index 9f5a731..0472aba 100644 --- a/src/livepeer_ai/utils/forms.py +++ b/src/livepeer_ai/utils/forms.py @@ -109,13 +109,12 @@ def serialize_multipart_form( if not field_metadata: continue - f_name = field.alias if field.alias is not None else name + f_name = field.alias if field.alias else name if field_metadata.file: file_fields: Dict[str, FieldInfo] = val.__class__.model_fields file_name = "" - field_name = "" content = None content_type = None @@ -131,20 +130,15 @@ def serialize_multipart_form( elif file_field_name == "content_type": content_type = getattr(val, file_field_name, None) else: - field_name = ( - file_field.alias - if file_field.alias is not None - else file_field_name - ) file_name = getattr(val, file_field_name) - if field_name == "" or file_name == "" or content is None: + if file_name == "" or content is None: raise ValueError("invalid multipart/form-data file") if content_type is not None: - files[field_name] = (file_name, content, content_type) + files[f_name] = (file_name, content, content_type) else: - files[field_name] = (file_name, content) + files[f_name] = (file_name, content) elif field_metadata.json: files[f_name] = ( None,