From 6ce19494860b35c28530105bb5e7205acd44153c Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Mon, 11 Sep 2023 12:13:44 +0000 Subject: [PATCH] CodeGen from PR 25513 in Azure/azure-rest-api-specs Merge fb4f558b15075826656a3061d332d1a198352dd4 into c78b5d8bd3aff2d82a5f034d9164b1a9ac030e09 --- .../azure/ai/openai/OpenAIAsyncClient.java | 503 +++++ .../com/azure/ai/openai/OpenAIClient.java | 501 +++++ .../azure/ai/openai/OpenAIServiceVersion.java | 7 +- .../implementation/OpenAIClientImpl.java | 1888 ++++++++++++++++- .../openai/models/ContentFilterResults.java | 20 + sdk/openai/azure-ai-openai/tsp-location.yaml | 6 +- 6 files changed, 2832 insertions(+), 93 deletions(-) diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIAsyncClient.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIAsyncClient.java index 42355fa64518..1289c04376ba 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIAsyncClient.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIAsyncClient.java @@ -651,6 +651,18 @@ PollerFlux beginBeginAzureBatchImageGeneration( * violence (Optional): (recursive schema, see violence above) * hate (Optional): (recursive schema, see hate above) * self_harm (Optional): (recursive schema, see self_harm above) + * error (Optional): { + * code: String (Required) + * message: String (Required) + * target: String (Optional) + * details (Optional): [ + * (recursive schema, see above) + * ] + * innererror (Optional): { + * code: String (Optional) + * innererror (Optional): (recursive schema, see innererror above) + * } + * } * } * } * ] @@ -688,4 +700,495 @@ Mono> getChatCompletionsWithAzureExtensionsWithResponse( return this.serviceClient.getChatCompletionsWithAzureExtensionsWithResponseAsync( deploymentOrModelName, chatCompletionsOptions, requestOptions); } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsSimpleJson This format will return an JSON structure containing a single "text" + * with the transcription. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response containing only the transcribed text along with {@link Response} on successful + * completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getAudioTranscriptionSimpleJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsSimpleJson, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranscriptionSimpleJsonWithResponseAsync( + deploymentOrModelName, contentLength, audioTranscriptionOptionsSimpleJson, requestOptions); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     *     task: String(transcribe/translate) (Required)
+     *     language: String (Required)
+     *     duration: double (Required)
+     *     segments (Required): [
+     *          (Required){
+     *             id: int (Required)
+     *             start: double (Required)
+     *             end: double (Required)
+     *             text: String (Required)
+     *             temperature: double (Required)
+     *             avg_logprob: double (Required)
+     *             compression_ratio: double (Required)
+     *             no_speech_prob: double (Required)
+     *             tokens (Required): [
+     *                 int (Required)
+     *             ]
+     *             seek: int (Required)
+     *         }
+     *     ]
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsVerboseJson This format will return an JSON structure containing an enriched + * structure with the transcription. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response along with {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getAudioTranscriptionVerboseJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsVerboseJson, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranscriptionVerboseJsonWithResponseAsync( + deploymentOrModelName, contentLength, audioTranscriptionOptionsVerboseJson, requestOptions); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsPlainText This will make the response return the transcription as plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getAudioTranscriptionPlainTextWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsPlainText, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranscriptionPlainTextWithResponseAsync( + deploymentOrModelName, contentLength, audioTranscriptionOptionsPlainText, requestOptions); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsSrt The transcription will be provided in SRT format (SubRip Text) in the form of + * plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getAudioTranscriptionSrtWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsSrt, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranscriptionSrtWithResponseAsync( + deploymentOrModelName, contentLength, audioTranscriptionOptionsSrt, requestOptions); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsVtt The transcription will be provided in VTT format (Web Video Text Tracks) in + * the form of plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getAudioTranscriptionVttWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsVtt, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranscriptionVttWithResponseAsync( + deploymentOrModelName, contentLength, audioTranscriptionOptionsVtt, requestOptions); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsSimpleJson This format will return an JSON structure containing a single "text" + * with the translation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response containing only the transcribed text along with {@link Response} on successful + * completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getAudioTranslationSimpleJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsSimpleJson, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranslationSimpleJsonWithResponseAsync( + deploymentOrModelName, contentLength, audioTranslationOptionsSimpleJson, requestOptions); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     *     task: String(transcribe/translate) (Required)
+     *     language: String (Required)
+     *     duration: double (Required)
+     *     segments (Required): [
+     *          (Required){
+     *             id: int (Required)
+     *             start: double (Required)
+     *             end: double (Required)
+     *             text: String (Required)
+     *             temperature: double (Required)
+     *             avg_logprob: double (Required)
+     *             compression_ratio: double (Required)
+     *             no_speech_prob: double (Required)
+     *             tokens (Required): [
+     *                 int (Required)
+     *             ]
+     *             seek: int (Required)
+     *         }
+     *     ]
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsVerboseJson This format will return an JSON structure containing an enriched + * structure with the translation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response along with {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getAudioTranslationVerboseJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsVerboseJson, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranslationVerboseJsonWithResponseAsync( + deploymentOrModelName, contentLength, audioTranslationOptionsVerboseJson, requestOptions); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsPlainText This will make the response return the translation as plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getAudioTranslationPlainTextWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsPlainText, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranslationPlainTextWithResponseAsync( + deploymentOrModelName, contentLength, audioTranslationOptionsPlainText, requestOptions); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsSrt The translation will be provided in SRT format (SubRip Text) in the form of + * plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getAudioTranslationSrtWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsSrt, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranslationSrtWithResponseAsync( + deploymentOrModelName, contentLength, audioTranslationOptionsSrt, requestOptions); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsVtt The translation will be provided in VTT format (Web Video Text Tracks) in the + * form of plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Mono> getAudioTranslationVttWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsVtt, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranslationVttWithResponseAsync( + deploymentOrModelName, contentLength, audioTranslationOptionsVtt, requestOptions); + } } diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIClient.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIClient.java index 138b05addadc..1a9e53be280a 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIClient.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIClient.java @@ -650,6 +650,18 @@ SyncPoller beginBeginAzureBatchImageGeneration( * violence (Optional): (recursive schema, see violence above) * hate (Optional): (recursive schema, see hate above) * self_harm (Optional): (recursive schema, see self_harm above) + * error (Optional): { + * code: String (Required) + * message: String (Required) + * target: String (Optional) + * details (Optional): [ + * (recursive schema, see above) + * ] + * innererror (Optional): { + * code: String (Optional) + * innererror (Optional): (recursive schema, see innererror above) + * } + * } * } * } * ] @@ -687,4 +699,493 @@ Response getChatCompletionsWithAzureExtensionsWithResponse( return this.serviceClient.getChatCompletionsWithAzureExtensionsWithResponse( deploymentOrModelName, chatCompletionsOptions, requestOptions); } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsSimpleJson This format will return an JSON structure containing a single "text" + * with the transcription. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response containing only the transcribed text along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getAudioTranscriptionSimpleJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsSimpleJson, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranscriptionSimpleJsonWithResponse( + deploymentOrModelName, contentLength, audioTranscriptionOptionsSimpleJson, requestOptions); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     *     task: String(transcribe/translate) (Required)
+     *     language: String (Required)
+     *     duration: double (Required)
+     *     segments (Required): [
+     *          (Required){
+     *             id: int (Required)
+     *             start: double (Required)
+     *             end: double (Required)
+     *             text: String (Required)
+     *             temperature: double (Required)
+     *             avg_logprob: double (Required)
+     *             compression_ratio: double (Required)
+     *             no_speech_prob: double (Required)
+     *             tokens (Required): [
+     *                 int (Required)
+     *             ]
+     *             seek: int (Required)
+     *         }
+     *     ]
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsVerboseJson This format will return an JSON structure containing an enriched + * structure with the transcription. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getAudioTranscriptionVerboseJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsVerboseJson, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranscriptionVerboseJsonWithResponse( + deploymentOrModelName, contentLength, audioTranscriptionOptionsVerboseJson, requestOptions); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsPlainText This will make the response return the transcription as plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getAudioTranscriptionPlainTextWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsPlainText, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranscriptionPlainTextWithResponse( + deploymentOrModelName, contentLength, audioTranscriptionOptionsPlainText, requestOptions); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsSrt The transcription will be provided in SRT format (SubRip Text) in the form of + * plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getAudioTranscriptionSrtWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsSrt, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranscriptionSrtWithResponse( + deploymentOrModelName, contentLength, audioTranscriptionOptionsSrt, requestOptions); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsVtt The transcription will be provided in VTT format (Web Video Text Tracks) in + * the form of plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getAudioTranscriptionVttWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsVtt, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranscriptionVttWithResponse( + deploymentOrModelName, contentLength, audioTranscriptionOptionsVtt, requestOptions); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsSimpleJson This format will return an JSON structure containing a single "text" + * with the translation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response containing only the transcribed text along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getAudioTranslationSimpleJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsSimpleJson, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranslationSimpleJsonWithResponse( + deploymentOrModelName, contentLength, audioTranslationOptionsSimpleJson, requestOptions); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     *     task: String(transcribe/translate) (Required)
+     *     language: String (Required)
+     *     duration: double (Required)
+     *     segments (Required): [
+     *          (Required){
+     *             id: int (Required)
+     *             start: double (Required)
+     *             end: double (Required)
+     *             text: String (Required)
+     *             temperature: double (Required)
+     *             avg_logprob: double (Required)
+     *             compression_ratio: double (Required)
+     *             no_speech_prob: double (Required)
+     *             tokens (Required): [
+     *                 int (Required)
+     *             ]
+     *             seek: int (Required)
+     *         }
+     *     ]
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsVerboseJson This format will return an JSON structure containing an enriched + * structure with the translation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getAudioTranslationVerboseJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsVerboseJson, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranslationVerboseJsonWithResponse( + deploymentOrModelName, contentLength, audioTranslationOptionsVerboseJson, requestOptions); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsPlainText This will make the response return the translation as plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getAudioTranslationPlainTextWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsPlainText, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranslationPlainTextWithResponse( + deploymentOrModelName, contentLength, audioTranslationOptionsPlainText, requestOptions); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsSrt The translation will be provided in SRT format (SubRip Text) in the form of + * plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getAudioTranslationSrtWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsSrt, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranslationSrtWithResponse( + deploymentOrModelName, contentLength, audioTranslationOptionsSrt, requestOptions); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsVtt The translation will be provided in VTT format (Web Video Text Tracks) in the + * form of plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response}. + */ + @Generated + @ServiceMethod(returns = ReturnType.SINGLE) + Response getAudioTranslationVttWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsVtt, + RequestOptions requestOptions) { + return this.serviceClient.getAudioTranslationVttWithResponse( + deploymentOrModelName, contentLength, audioTranslationOptionsVtt, requestOptions); + } } diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIServiceVersion.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIServiceVersion.java index 9844431603fa..3027940ba21f 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIServiceVersion.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/OpenAIServiceVersion.java @@ -21,7 +21,10 @@ public enum OpenAIServiceVersion implements ServiceVersion { V2023_07_01_PREVIEW("2023-07-01-preview"), /** Enum value 2023-08-01-preview. */ - V2023_08_01_PREVIEW("2023-08-01-preview"); + V2023_08_01_PREVIEW("2023-08-01-preview"), + + /** Enum value 2023-09-01-preview. */ + V2023_09_01_PREVIEW("2023-09-01-preview"); private final String version; @@ -41,6 +44,6 @@ public String getVersion() { * @return The latest {@link OpenAIServiceVersion}. */ public static OpenAIServiceVersion getLatest() { - return V2023_08_01_PREVIEW; + return V2023_09_01_PREVIEW; } } diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/implementation/OpenAIClientImpl.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/implementation/OpenAIClientImpl.java index d99101686256..4b5d458c5310 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/implementation/OpenAIClientImpl.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/implementation/OpenAIClientImpl.java @@ -153,7 +153,7 @@ public OpenAIClientImpl( @Host("{endpoint}/openai") @ServiceInterface(name = "OpenAIClient") public interface OpenAIClientService { - @Post("/deployments/{deploymentId}/embeddings") + @Post("/deployments/{deploymentId}/audio/transcriptions") @ExpectedResponses({200}) @UnexpectedResponseExceptionType( value = ClientAuthenticationException.class, @@ -165,16 +165,18 @@ public interface OpenAIClientService { value = ResourceModifiedException.class, code = {409}) @UnexpectedResponseExceptionType(HttpResponseException.class) - Mono> getEmbeddings( + Mono> getAudioTranscriptionSimpleJson( @HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, @HeaderParam("accept") String accept, - @BodyParam("application/json") BinaryData embeddingsOptions, + @BodyParam("multipart/form-data;") BinaryData audioTranscriptionOptionsSimpleJson, RequestOptions requestOptions, Context context); - @Post("/deployments/{deploymentId}/embeddings") + @Post("/deployments/{deploymentId}/audio/transcriptions") @ExpectedResponses({200}) @UnexpectedResponseExceptionType( value = ClientAuthenticationException.class, @@ -186,16 +188,18 @@ Mono> getEmbeddings( value = ResourceModifiedException.class, code = {409}) @UnexpectedResponseExceptionType(HttpResponseException.class) - Response getEmbeddingsSync( + Response getAudioTranscriptionSimpleJsonSync( @HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, @HeaderParam("accept") String accept, - @BodyParam("application/json") BinaryData embeddingsOptions, + @BodyParam("multipart/form-data;") BinaryData audioTranscriptionOptionsSimpleJson, RequestOptions requestOptions, Context context); - @Post("/deployments/{deploymentId}/completions") + @Post("/deployments/{deploymentId}/audio/transcriptions") @ExpectedResponses({200}) @UnexpectedResponseExceptionType( value = ClientAuthenticationException.class, @@ -207,16 +211,18 @@ Response getEmbeddingsSync( value = ResourceModifiedException.class, code = {409}) @UnexpectedResponseExceptionType(HttpResponseException.class) - Mono> getCompletions( + Mono> getAudioTranscriptionVerboseJson( @HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, @HeaderParam("accept") String accept, - @BodyParam("application/json") BinaryData completionsOptions, + @BodyParam("multipart/form-data;") BinaryData audioTranscriptionOptionsVerboseJson, RequestOptions requestOptions, Context context); - @Post("/deployments/{deploymentId}/completions") + @Post("/deployments/{deploymentId}/audio/transcriptions") @ExpectedResponses({200}) @UnexpectedResponseExceptionType( value = ClientAuthenticationException.class, @@ -228,16 +234,18 @@ Mono> getCompletions( value = ResourceModifiedException.class, code = {409}) @UnexpectedResponseExceptionType(HttpResponseException.class) - Response getCompletionsSync( + Response getAudioTranscriptionVerboseJsonSync( @HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, @HeaderParam("accept") String accept, - @BodyParam("application/json") BinaryData completionsOptions, + @BodyParam("multipart/form-data;") BinaryData audioTranscriptionOptionsVerboseJson, RequestOptions requestOptions, Context context); - @Post("/deployments/{deploymentId}/chat/completions") + @Post("/deployments/{deploymentId}/audio/transcriptions") @ExpectedResponses({200}) @UnexpectedResponseExceptionType( value = ClientAuthenticationException.class, @@ -249,16 +257,18 @@ Response getCompletionsSync( value = ResourceModifiedException.class, code = {409}) @UnexpectedResponseExceptionType(HttpResponseException.class) - Mono> getChatCompletions( + Mono> getAudioTranscriptionPlainText( @HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, @HeaderParam("accept") String accept, - @BodyParam("application/json") BinaryData chatCompletionsOptions, + @BodyParam("multipart/form-data;") BinaryData audioTranscriptionOptionsPlainText, RequestOptions requestOptions, Context context); - @Post("/deployments/{deploymentId}/chat/completions") + @Post("/deployments/{deploymentId}/audio/transcriptions") @ExpectedResponses({200}) @UnexpectedResponseExceptionType( value = ClientAuthenticationException.class, @@ -270,16 +280,18 @@ Mono> getChatCompletions( value = ResourceModifiedException.class, code = {409}) @UnexpectedResponseExceptionType(HttpResponseException.class) - Response getChatCompletionsSync( + Response getAudioTranscriptionPlainTextSync( @HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, @HeaderParam("accept") String accept, - @BodyParam("application/json") BinaryData chatCompletionsOptions, + @BodyParam("multipart/form-data;") BinaryData audioTranscriptionOptionsPlainText, RequestOptions requestOptions, Context context); - @Post("/deployments/{deploymentId}/extensions/chat/completions") + @Post("/deployments/{deploymentId}/audio/transcriptions") @ExpectedResponses({200}) @UnexpectedResponseExceptionType( value = ClientAuthenticationException.class, @@ -291,16 +303,18 @@ Response getChatCompletionsSync( value = ResourceModifiedException.class, code = {409}) @UnexpectedResponseExceptionType(HttpResponseException.class) - Mono> getChatCompletionsWithAzureExtensions( + Mono> getAudioTranscriptionSrt( @HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, @HeaderParam("accept") String accept, - @BodyParam("application/json") BinaryData chatCompletionsOptions, + @BodyParam("multipart/form-data;") BinaryData audioTranscriptionOptionsSrt, RequestOptions requestOptions, Context context); - @Post("/deployments/{deploymentId}/extensions/chat/completions") + @Post("/deployments/{deploymentId}/audio/transcriptions") @ExpectedResponses({200}) @UnexpectedResponseExceptionType( value = ClientAuthenticationException.class, @@ -312,17 +326,19 @@ Mono> getChatCompletionsWithAzureExtensions( value = ResourceModifiedException.class, code = {409}) @UnexpectedResponseExceptionType(HttpResponseException.class) - Response getChatCompletionsWithAzureExtensionsSync( + Response getAudioTranscriptionSrtSync( @HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, @HeaderParam("accept") String accept, - @BodyParam("application/json") BinaryData chatCompletionsOptions, + @BodyParam("multipart/form-data;") BinaryData audioTranscriptionOptionsSrt, RequestOptions requestOptions, Context context); - @Post("/images/generations:submit") - @ExpectedResponses({202}) + @Post("/deployments/{deploymentId}/audio/transcriptions") + @ExpectedResponses({200}) @UnexpectedResponseExceptionType( value = ClientAuthenticationException.class, code = {401}) @@ -333,16 +349,19 @@ Response getChatCompletionsWithAzureExtensionsSync( value = ResourceModifiedException.class, code = {409}) @UnexpectedResponseExceptionType(HttpResponseException.class) - Mono> beginAzureBatchImageGeneration( + Mono> getAudioTranscriptionVtt( @HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, @HeaderParam("accept") String accept, - @BodyParam("application/json") BinaryData imageGenerationOptions, + @BodyParam("multipart/form-data;") BinaryData audioTranscriptionOptionsVtt, RequestOptions requestOptions, Context context); - @Post("/images/generations:submit") - @ExpectedResponses({202}) + @Post("/deployments/{deploymentId}/audio/transcriptions") + @ExpectedResponses({200}) @UnexpectedResponseExceptionType( value = ClientAuthenticationException.class, code = {401}) @@ -353,137 +372,1632 @@ Mono> beginAzureBatchImageGeneration( value = ResourceModifiedException.class, code = {409}) @UnexpectedResponseExceptionType(HttpResponseException.class) - Response beginAzureBatchImageGenerationSync( + Response getAudioTranscriptionVttSync( @HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, @HeaderParam("accept") String accept, - @BodyParam("application/json") BinaryData imageGenerationOptions, + @BodyParam("multipart/form-data;") BinaryData audioTranscriptionOptionsVtt, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/audio/translations") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getAudioTranslationSimpleJson( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, + @HeaderParam("accept") String accept, + @BodyParam("multipart/form-data;") BinaryData audioTranslationOptionsSimpleJson, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/audio/translations") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getAudioTranslationSimpleJsonSync( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, + @HeaderParam("accept") String accept, + @BodyParam("multipart/form-data;") BinaryData audioTranslationOptionsSimpleJson, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/audio/translations") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getAudioTranslationVerboseJson( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, + @HeaderParam("accept") String accept, + @BodyParam("multipart/form-data;") BinaryData audioTranslationOptionsVerboseJson, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/audio/translations") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getAudioTranslationVerboseJsonSync( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, + @HeaderParam("accept") String accept, + @BodyParam("multipart/form-data;") BinaryData audioTranslationOptionsVerboseJson, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/audio/translations") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getAudioTranslationPlainText( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, + @HeaderParam("accept") String accept, + @BodyParam("multipart/form-data;") BinaryData audioTranslationOptionsPlainText, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/audio/translations") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getAudioTranslationPlainTextSync( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, + @HeaderParam("accept") String accept, + @BodyParam("multipart/form-data;") BinaryData audioTranslationOptionsPlainText, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/audio/translations") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getAudioTranslationSrt( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, + @HeaderParam("accept") String accept, + @BodyParam("multipart/form-data;") BinaryData audioTranslationOptionsSrt, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/audio/translations") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getAudioTranslationSrtSync( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, + @HeaderParam("accept") String accept, + @BodyParam("multipart/form-data;") BinaryData audioTranslationOptionsSrt, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/audio/translations") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getAudioTranslationVtt( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, + @HeaderParam("accept") String accept, + @BodyParam("multipart/form-data;") BinaryData audioTranslationOptionsVtt, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/audio/translations") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getAudioTranslationVttSync( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("content-type") String contentType, + @HeaderParam("content-length") long contentLength, + @HeaderParam("accept") String accept, + @BodyParam("multipart/form-data;") BinaryData audioTranslationOptionsVtt, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/completions") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getCompletions( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("accept") String accept, + @BodyParam("application/json") BinaryData completionsOptions, RequestOptions requestOptions, Context context); + + @Post("/deployments/{deploymentId}/completions") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getCompletionsSync( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("accept") String accept, + @BodyParam("application/json") BinaryData completionsOptions, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/chat/completions") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getChatCompletions( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("accept") String accept, + @BodyParam("application/json") BinaryData chatCompletionsOptions, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/chat/completions") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getChatCompletionsSync( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("accept") String accept, + @BodyParam("application/json") BinaryData chatCompletionsOptions, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/extensions/chat/completions") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getChatCompletionsWithAzureExtensions( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("accept") String accept, + @BodyParam("application/json") BinaryData chatCompletionsOptions, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/extensions/chat/completions") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getChatCompletionsWithAzureExtensionsSync( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("accept") String accept, + @BodyParam("application/json") BinaryData chatCompletionsOptions, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/embeddings") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> getEmbeddings( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("accept") String accept, + @BodyParam("application/json") BinaryData embeddingsOptions, + RequestOptions requestOptions, + Context context); + + @Post("/deployments/{deploymentId}/embeddings") + @ExpectedResponses({200}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response getEmbeddingsSync( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @PathParam("deploymentId") String deploymentOrModelName, + @HeaderParam("accept") String accept, + @BodyParam("application/json") BinaryData embeddingsOptions, + RequestOptions requestOptions, + Context context); + + @Post("/images/generations:submit") + @ExpectedResponses({202}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Mono> beginAzureBatchImageGeneration( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @HeaderParam("accept") String accept, + @BodyParam("application/json") BinaryData imageGenerationOptions, + RequestOptions requestOptions, + Context context); + + @Post("/images/generations:submit") + @ExpectedResponses({202}) + @UnexpectedResponseExceptionType( + value = ClientAuthenticationException.class, + code = {401}) + @UnexpectedResponseExceptionType( + value = ResourceNotFoundException.class, + code = {404}) + @UnexpectedResponseExceptionType( + value = ResourceModifiedException.class, + code = {409}) + @UnexpectedResponseExceptionType(HttpResponseException.class) + Response beginAzureBatchImageGenerationSync( + @HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, + @HeaderParam("accept") String accept, + @BodyParam("application/json") BinaryData imageGenerationOptions, + RequestOptions requestOptions, + Context context); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsSimpleJson This format will return an JSON structure containing a single "text" + * with the transcription. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response containing only the transcribed text along with {@link Response} on successful + * completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getAudioTranscriptionSimpleJsonWithResponseAsync( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsSimpleJson, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return FluxUtil.withContext( + context -> + service.getAudioTranscriptionSimpleJson( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranscriptionOptionsSimpleJson, + requestOptions, + context)); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsSimpleJson This format will return an JSON structure containing a single "text" + * with the transcription. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response containing only the transcribed text along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getAudioTranscriptionSimpleJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsSimpleJson, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return service.getAudioTranscriptionSimpleJsonSync( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranscriptionOptionsSimpleJson, + requestOptions, + Context.NONE); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     *     task: String(transcribe/translate) (Required)
+     *     language: String (Required)
+     *     duration: double (Required)
+     *     segments (Required): [
+     *          (Required){
+     *             id: int (Required)
+     *             start: double (Required)
+     *             end: double (Required)
+     *             text: String (Required)
+     *             temperature: double (Required)
+     *             avg_logprob: double (Required)
+     *             compression_ratio: double (Required)
+     *             no_speech_prob: double (Required)
+     *             tokens (Required): [
+     *                 int (Required)
+     *             ]
+     *             seek: int (Required)
+     *         }
+     *     ]
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsVerboseJson This format will return an JSON structure containing an enriched + * structure with the transcription. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getAudioTranscriptionVerboseJsonWithResponseAsync( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsVerboseJson, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return FluxUtil.withContext( + context -> + service.getAudioTranscriptionVerboseJson( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranscriptionOptionsVerboseJson, + requestOptions, + context)); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     *     task: String(transcribe/translate) (Required)
+     *     language: String (Required)
+     *     duration: double (Required)
+     *     segments (Required): [
+     *          (Required){
+     *             id: int (Required)
+     *             start: double (Required)
+     *             end: double (Required)
+     *             text: String (Required)
+     *             temperature: double (Required)
+     *             avg_logprob: double (Required)
+     *             compression_ratio: double (Required)
+     *             no_speech_prob: double (Required)
+     *             tokens (Required): [
+     *                 int (Required)
+     *             ]
+     *             seek: int (Required)
+     *         }
+     *     ]
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsVerboseJson This format will return an JSON structure containing an enriched + * structure with the transcription. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getAudioTranscriptionVerboseJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsVerboseJson, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return service.getAudioTranscriptionVerboseJsonSync( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranscriptionOptionsVerboseJson, + requestOptions, + Context.NONE); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsPlainText This will make the response return the transcription as plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getAudioTranscriptionPlainTextWithResponseAsync( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsPlainText, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return FluxUtil.withContext( + context -> + service.getAudioTranscriptionPlainText( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranscriptionOptionsPlainText, + requestOptions, + context)); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsPlainText This will make the response return the transcription as plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getAudioTranscriptionPlainTextWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsPlainText, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return service.getAudioTranscriptionPlainTextSync( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranscriptionOptionsPlainText, + requestOptions, + Context.NONE); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsSrt The transcription will be provided in SRT format (SubRip Text) in the form of + * plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getAudioTranscriptionSrtWithResponseAsync( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsSrt, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return FluxUtil.withContext( + context -> + service.getAudioTranscriptionSrt( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranscriptionOptionsSrt, + requestOptions, + context)); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsSrt The transcription will be provided in SRT format (SubRip Text) in the form of + * plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getAudioTranscriptionSrtWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsSrt, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return service.getAudioTranscriptionSrtSync( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranscriptionOptionsSrt, + requestOptions, + Context.NONE); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsVtt The transcription will be provided in VTT format (Web Video Text Tracks) in + * the form of plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getAudioTranscriptionVttWithResponseAsync( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsVtt, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return FluxUtil.withContext( + context -> + service.getAudioTranscriptionVtt( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranscriptionOptionsVtt, + requestOptions, + context)); + } + + /** + * Transcribes audio into the input language. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     language: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranscriptionOptionsVtt The transcription will be provided in VTT format (Web Video Text Tracks) in + * the form of plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getAudioTranscriptionVttWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranscriptionOptionsVtt, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return service.getAudioTranscriptionVttSync( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranscriptionOptionsVtt, + requestOptions, + Context.NONE); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsSimpleJson This format will return an JSON structure containing a single "text" + * with the translation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response containing only the transcribed text along with {@link Response} on successful + * completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getAudioTranslationSimpleJsonWithResponseAsync( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsSimpleJson, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return FluxUtil.withContext( + context -> + service.getAudioTranslationSimpleJson( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranslationOptionsSimpleJson, + requestOptions, + context)); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsSimpleJson This format will return an JSON structure containing a single "text" + * with the translation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response containing only the transcribed text along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getAudioTranslationSimpleJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsSimpleJson, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return service.getAudioTranslationSimpleJsonSync( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranslationOptionsSimpleJson, + requestOptions, + Context.NONE); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     *     task: String(transcribe/translate) (Required)
+     *     language: String (Required)
+     *     duration: double (Required)
+     *     segments (Required): [
+     *          (Required){
+     *             id: int (Required)
+     *             start: double (Required)
+     *             end: double (Required)
+     *             text: String (Required)
+     *             temperature: double (Required)
+     *             avg_logprob: double (Required)
+     *             compression_ratio: double (Required)
+     *             no_speech_prob: double (Required)
+     *             tokens (Required): [
+     *                 int (Required)
+     *             ]
+     *             seek: int (Required)
+     *         }
+     *     ]
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsVerboseJson This format will return an JSON structure containing an enriched + * structure with the translation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getAudioTranslationVerboseJsonWithResponseAsync( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsVerboseJson, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return FluxUtil.withContext( + context -> + service.getAudioTranslationVerboseJson( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranslationOptionsVerboseJson, + requestOptions, + context)); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     text: String (Required)
+     *     task: String(transcribe/translate) (Required)
+     *     language: String (Required)
+     *     duration: double (Required)
+     *     segments (Required): [
+     *          (Required){
+     *             id: int (Required)
+     *             start: double (Required)
+     *             end: double (Required)
+     *             text: String (Required)
+     *             temperature: double (Required)
+     *             avg_logprob: double (Required)
+     *             compression_ratio: double (Required)
+     *             no_speech_prob: double (Required)
+     *             tokens (Required): [
+     *                 int (Required)
+     *             ]
+     *             seek: int (Required)
+     *         }
+     *     ]
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsVerboseJson This format will return an JSON structure containing an enriched + * structure with the translation. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return transcription response along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getAudioTranslationVerboseJsonWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsVerboseJson, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return service.getAudioTranslationVerboseJsonSync( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranslationOptionsVerboseJson, + requestOptions, + Context.NONE); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsPlainText This will make the response return the translation as plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getAudioTranslationPlainTextWithResponseAsync( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsPlainText, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return FluxUtil.withContext( + context -> + service.getAudioTranslationPlainText( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranslationOptionsPlainText, + requestOptions, + context)); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsPlainText This will make the response return the translation as plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getAudioTranslationPlainTextWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsPlainText, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return service.getAudioTranslationPlainTextSync( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranslationOptionsPlainText, + requestOptions, + Context.NONE); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsSrt The translation will be provided in SRT format (SubRip Text) in the form of + * plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getAudioTranslationSrtWithResponseAsync( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsSrt, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return FluxUtil.withContext( + context -> + service.getAudioTranslationSrt( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranslationOptionsSrt, + requestOptions, + context)); } /** - * Return the embeddings for a given prompt. + * Transcribes and translates input audio into English text. * *

Request Body Schema * *

{@code
      * {
-     *     user: String (Optional)
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
      *     model: String (Optional)
-     *     input (Required): [
-     *         String (Required)
-     *     ]
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
      * }
      * }
* *

Response Body Schema * *

{@code
+     * String
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsSrt The translation will be provided in SRT format (SubRip Text) in the form of + * plain/text. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return a sequence of textual characters along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getAudioTranslationSrtWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsSrt, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; + final String accept = "application/json"; + return service.getAudioTranslationSrtSync( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + contentType, + contentLength, + accept, + audioTranslationOptionsSrt, + requestOptions, + Context.NONE); + } + + /** + * Transcribes and translates input audio into English text. + * + *

Request Body Schema + * + *

{@code
      * {
-     *     data (Required): [
-     *          (Required){
-     *             embedding (Required): [
-     *                 double (Required)
-     *             ]
-     *             index: int (Required)
-     *         }
-     *     ]
-     *     usage (Required): {
-     *         prompt_tokens: int (Required)
-     *         total_tokens: int (Required)
-     *     }
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
+     *     model: String (Optional)
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
      * }
      * }
* + *

Response Body Schema + * + *

{@code
+     * String
+     * }
+ * * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name * (when using non-Azure OpenAI) to use for this request. - * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the - * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar - * scenarios. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsVtt The translation will be provided in VTT format (Web Video Text Tracks) in the + * form of plain/text. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of - * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios along - * with {@link Response} on successful completion of {@link Mono}. + * @return a sequence of textual characters along with {@link Response} on successful completion of {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) - public Mono> getEmbeddingsWithResponseAsync( - String deploymentOrModelName, BinaryData embeddingsOptions, RequestOptions requestOptions) { + public Mono> getAudioTranslationVttWithResponseAsync( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsVtt, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; final String accept = "application/json"; return FluxUtil.withContext( context -> - service.getEmbeddings( + service.getAudioTranslationVtt( this.getEndpoint(), this.getServiceVersion().getVersion(), deploymentOrModelName, + contentType, + contentLength, accept, - embeddingsOptions, + audioTranslationOptionsVtt, requestOptions, context)); } /** - * Return the embeddings for a given prompt. + * Transcribes and translates input audio into English text. * *

Request Body Schema * *

{@code
      * {
-     *     user: String (Optional)
+     *     file: byte[] (Required)
+     *     prompt: String (Optional)
+     *     temperature: Double (Optional)
      *     model: String (Optional)
-     *     input (Required): [
-     *         String (Required)
-     *     ]
+     *     response_format: String(json/verbose_json/text/srt/vtt) (Optional)
      * }
      * }
* *

Response Body Schema * *

{@code
-     * {
-     *     data (Required): [
-     *          (Required){
-     *             embedding (Required): [
-     *                 double (Required)
-     *             ]
-     *             index: int (Required)
-     *         }
-     *     ]
-     *     usage (Required): {
-     *         prompt_tokens: int (Required)
-     *         total_tokens: int (Required)
-     *     }
-     * }
+     * String
      * }
* * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name * (when using non-Azure OpenAI) to use for this request. - * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the - * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar - * scenarios. + * @param contentLength The content length of the operation. This needs to be provided by the caller. + * @param audioTranslationOptionsVtt The translation will be provided in VTT format (Web Video Text Tracks) in the + * form of plain/text. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. - * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of - * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios along - * with {@link Response}. + * @return a sequence of textual characters along with {@link Response}. */ @ServiceMethod(returns = ReturnType.SINGLE) - public Response getEmbeddingsWithResponse( - String deploymentOrModelName, BinaryData embeddingsOptions, RequestOptions requestOptions) { + public Response getAudioTranslationVttWithResponse( + String deploymentOrModelName, + long contentLength, + BinaryData audioTranslationOptionsVtt, + RequestOptions requestOptions) { + final String contentType = "multipart/form-data;"; final String accept = "application/json"; - return service.getEmbeddingsSync( + return service.getAudioTranslationVttSync( this.getEndpoint(), this.getServiceVersion().getVersion(), deploymentOrModelName, + contentType, + contentLength, accept, - embeddingsOptions, + audioTranslationOptionsVtt, requestOptions, Context.NONE); } @@ -537,6 +2051,18 @@ public Response getEmbeddingsWithResponse( * violence (Optional): (recursive schema, see violence above) * hate (Optional): (recursive schema, see hate above) * self_harm (Optional): (recursive schema, see self_harm above) + * error (Optional): { + * code: String (Required) + * message: String (Required) + * target: String (Optional) + * details (Optional): [ + * (recursive schema, see above) + * ] + * innererror (Optional): { + * code: String (Optional) + * innererror (Optional): (recursive schema, see innererror above) + * } + * } * } * } * ] @@ -650,6 +2176,18 @@ public Mono> getCompletionsWithResponseAsync( * violence (Optional): (recursive schema, see violence above) * hate (Optional): (recursive schema, see hate above) * self_harm (Optional): (recursive schema, see self_harm above) + * error (Optional): { + * code: String (Required) + * message: String (Required) + * target: String (Optional) + * details (Optional): [ + * (recursive schema, see above) + * ] + * innererror (Optional): { + * code: String (Optional) + * innererror (Optional): (recursive schema, see innererror above) + * } + * } * } * } * ] @@ -800,6 +2338,18 @@ public Response getCompletionsWithResponse( * violence (Optional): (recursive schema, see violence above) * hate (Optional): (recursive schema, see hate above) * self_harm (Optional): (recursive schema, see self_harm above) + * error (Optional): { + * code: String (Required) + * message: String (Required) + * target: String (Optional) + * details (Optional): [ + * (recursive schema, see above) + * ] + * innererror (Optional): { + * code: String (Optional) + * innererror (Optional): (recursive schema, see innererror above) + * } + * } * } * } * ] @@ -935,6 +2485,18 @@ public Mono> getChatCompletionsWithResponseAsync( * violence (Optional): (recursive schema, see violence above) * hate (Optional): (recursive schema, see hate above) * self_harm (Optional): (recursive schema, see self_harm above) + * error (Optional): { + * code: String (Required) + * message: String (Required) + * target: String (Optional) + * details (Optional): [ + * (recursive schema, see above) + * ] + * innererror (Optional): { + * code: String (Optional) + * innererror (Optional): (recursive schema, see innererror above) + * } + * } * } * } * ] @@ -1068,6 +2630,18 @@ public Response getChatCompletionsWithResponse( * violence (Optional): (recursive schema, see violence above) * hate (Optional): (recursive schema, see hate above) * self_harm (Optional): (recursive schema, see self_harm above) + * error (Optional): { + * code: String (Required) + * message: String (Required) + * target: String (Optional) + * details (Optional): [ + * (recursive schema, see above) + * ] + * innererror (Optional): { + * code: String (Optional) + * innererror (Optional): (recursive schema, see innererror above) + * } + * } * } * } * ] @@ -1204,6 +2778,18 @@ public Mono> getChatCompletionsWithAzureExtensionsWithRespo * violence (Optional): (recursive schema, see violence above) * hate (Optional): (recursive schema, see hate above) * self_harm (Optional): (recursive schema, see self_harm above) + * error (Optional): { + * code: String (Required) + * message: String (Required) + * target: String (Optional) + * details (Optional): [ + * (recursive schema, see above) + * ] + * innererror (Optional): { + * code: String (Optional) + * innererror (Optional): (recursive schema, see innererror above) + * } + * } * } * } * ] @@ -1248,6 +2834,132 @@ public Response getChatCompletionsWithAzureExtensionsWithResponse( Context.NONE); } + /** + * Return the embeddings for a given prompt. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     user: String (Optional)
+     *     model: String (Optional)
+     *     input (Required): [
+     *         String (Required)
+     *     ]
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     data (Required): [
+     *          (Required){
+     *             embedding (Required): [
+     *                 double (Required)
+     *             ]
+     *             index: int (Required)
+     *         }
+     *     ]
+     *     usage (Required): {
+     *         prompt_tokens: int (Required)
+     *         total_tokens: int (Required)
+     *     }
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the + * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar + * scenarios. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of + * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios along + * with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Mono> getEmbeddingsWithResponseAsync( + String deploymentOrModelName, BinaryData embeddingsOptions, RequestOptions requestOptions) { + final String accept = "application/json"; + return FluxUtil.withContext( + context -> + service.getEmbeddings( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + accept, + embeddingsOptions, + requestOptions, + context)); + } + + /** + * Return the embeddings for a given prompt. + * + *

Request Body Schema + * + *

{@code
+     * {
+     *     user: String (Optional)
+     *     model: String (Optional)
+     *     input (Required): [
+     *         String (Required)
+     *     ]
+     * }
+     * }
+ * + *

Response Body Schema + * + *

{@code
+     * {
+     *     data (Required): [
+     *          (Required){
+     *             embedding (Required): [
+     *                 double (Required)
+     *             ]
+     *             index: int (Required)
+     *         }
+     *     ]
+     *     usage (Required): {
+     *         prompt_tokens: int (Required)
+     *         total_tokens: int (Required)
+     *     }
+     * }
+     * }
+ * + * @param deploymentOrModelName Specifies either the model deployment name (when using Azure OpenAI) or model name + * (when using non-Azure OpenAI) to use for this request. + * @param embeddingsOptions The configuration information for an embeddings request. Embeddings measure the + * relatedness of text strings and are commonly used for search, clustering, recommendations, and other similar + * scenarios. + * @param requestOptions The options to configure the HTTP request before HTTP client sends it. + * @throws HttpResponseException thrown if the request is rejected by server. + * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. + * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. + * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. + * @return representation of the response data from an embeddings request. Embeddings measure the relatedness of + * text strings and are commonly used for search, clustering, recommendations, and other similar scenarios along + * with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getEmbeddingsWithResponse( + String deploymentOrModelName, BinaryData embeddingsOptions, RequestOptions requestOptions) { + final String accept = "application/json"; + return service.getEmbeddingsSync( + this.getEndpoint(), + this.getServiceVersion().getVersion(), + deploymentOrModelName, + accept, + embeddingsOptions, + requestOptions, + Context.NONE); + } + /** * Starts the generation of a batch of images from a text caption. * diff --git a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/ContentFilterResults.java b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/ContentFilterResults.java index 65883af4465f..2c1c3c668bd3 100644 --- a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/ContentFilterResults.java +++ b/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/models/ContentFilterResults.java @@ -5,6 +5,7 @@ import com.azure.core.annotation.Generated; import com.azure.core.annotation.Immutable; +import com.azure.core.models.ResponseError; import com.fasterxml.jackson.annotation.JsonProperty; /** Information about the content filtering category, if it has been detected. */ @@ -98,4 +99,23 @@ public ContentFilterResult getSelfHarm() { /** Creates an instance of ContentFilterResults class. */ @Generated private ContentFilterResults() {} + + /* + * Describes an error returned if the content filtering system is + * down or otherwise unable to complete the operation in time. + */ + @Generated + @JsonProperty(value = "error") + private ResponseError error; + + /** + * Get the error property: Describes an error returned if the content filtering system is down or otherwise unable + * to complete the operation in time. + * + * @return the error value. + */ + @Generated + public ResponseError getError() { + return this.error; + } } diff --git a/sdk/openai/azure-ai-openai/tsp-location.yaml b/sdk/openai/azure-ai-openai/tsp-location.yaml index 368074679599..22caa8044520 100644 --- a/sdk/openai/azure-ai-openai/tsp-location.yaml +++ b/sdk/openai/azure-ai-openai/tsp-location.yaml @@ -1,5 +1,5 @@ directory: specification/cognitiveservices/OpenAI.Inference -additionalDirectories: - - specification/cognitiveservices/OpenAI.Authoring -commit: b646a42aa3b7a0ce488d05f1724827ea41d12cf1 +additionalDirectories: [] repo: Azure/azure-rest-api-specs +commit: 843f0beacfc01cdcd102cc52fd39f881d5aa402a +