diff --git a/eng/config.json b/eng/config.json index 614b5a47cf65..c843dfa2b257 100644 --- a/eng/config.json +++ b/eng/config.json @@ -46,7 +46,7 @@ }, { "Name": "azopenai", - "CoverageGoal": 0.34 + "CoverageGoal": 0.24 }, { "Name": "aztemplate", diff --git a/sdk/ai/azopenai/CHANGELOG.md b/sdk/ai/azopenai/CHANGELOG.md index c80d53d931a9..ebb1aeb9d43f 100644 --- a/sdk/ai/azopenai/CHANGELOG.md +++ b/sdk/ai/azopenai/CHANGELOG.md @@ -1,16 +1,26 @@ # Release History -## 0.4.0 (Unreleased) +## 0.4.0 (2023-12-07) + +Support for many of the features mentioned in OpenAI's November Dev Day and Microsoft's 2023 Ignite conference ### Features Added +- Chat completions has been extended to accomodate new features: + - Parallel function calling via Tools. See the function `ExampleClient_GetChatCompletions_functions` in `example_client_getchatcompletions_extensions_test.go` for an example of specifying a Tool. + - "JSON mode", via `ChatCompletionOptions.ResponseFormat` for guaranteed function outputs. +- ChatCompletions can now be used with both text and images using `gpt-4-vision-preview`. + - Azure enhancements to `gpt-4-vision-preview` results that include grounding and OCR features +- GetImageGenerations now works with DallE-3. +- `-1106` model feature support for `gpt-35-turbo` and `gpt-4-turbo`, including use of a seed via `ChatCompletionsOptions.Seed` and system fingerprints returned in `ChatCompletions.SystemFingerprint`. +- `dall-e-3` image generation capabilities via `GetImageGenerations`, featuring higher model quality, automatic prompt revisions by `gpt-4`, and customizable quality/style settings + ### Breaking Changes - `azopenai.KeyCredential` has been replaced by [azcore.KeyCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#KeyCredential). - -### Bugs Fixed - -### Other Changes +- `Deployment` has been renamed to `DeploymentName` throughout all APIs. +- `CreateImage` has been replaced with `GetImageGenerations`. +- `ChatMessage` has been split into per-role types. The function `ExampleClient_GetChatCompletions` in `example_client_getcompletions_test.go` shows an example of this. ## 0.3.0 (2023-09-26) diff --git a/sdk/ai/azopenai/assets.json b/sdk/ai/azopenai/assets.json index bc1ac87892ae..047ce368b0df 100644 --- a/sdk/ai/azopenai/assets.json +++ b/sdk/ai/azopenai/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/ai/azopenai", - "Tag": "go/ai/azopenai_5ce13f37c4" + "Tag": "go/ai/azopenai_9ed7d01267" } diff --git a/sdk/ai/azopenai/autorest.md b/sdk/ai/azopenai/autorest.md index 00af5456e2d2..b7bbb9c684f5 100644 --- a/sdk/ai/azopenai/autorest.md +++ b/sdk/ai/azopenai/autorest.md @@ -4,8 +4,7 @@ These settings apply only when `--go` is specified on the command line. ``` yaml input-file: -#- https://raw.githubusercontent.com/Azure/azure-rest-api-specs/13a645b66b741e3cc2ef378cb81974b30e6a7a86/specification/cognitiveservices/AzureOpenAI/inference/2023-06-01-preview/generated.json -- ./testdata/generated/openapi3.json +- https://github.com/Azure/azure-rest-api-specs/blob/d402f685809d6d08be9c0b45065cadd7d78ab870/specification/cognitiveservices/data-plane/AzureOpenAI/inference/preview/2023-12-01-preview/generated.json output-folder: ../azopenai clear-output-folder: false @@ -22,130 +21,232 @@ slice-elements-byval: true ## Transformations +Fix deployment and endpoint parameters so they show up in the right spots + ``` yaml directive: # Add x-ms-parameter-location to parameters in x-ms-parameterized-host - - from: openapi-document - where: $.servers.0.variables.endpoint - debug: true + - from: swagger-document + where: $["x-ms-parameterized-host"].parameters.0 transform: $["x-ms-parameter-location"] = "client"; - # Make deploymentId a client parameter # This must be done in each operation as the parameter is not defined in the components section - - from: openapi-document + - from: swagger-document where: $.paths..parameters..[?(@.name=='deploymentId')] transform: $["x-ms-parameter-location"] = "client"; +``` + +## Model -> DeploymentName + +```yaml +directive: + - from: swagger-document + where: $.definitions + transform: | + $["AudioTranscriptionOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; + $["AudioTranslationOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; + $["ChatCompletionsOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; + $["CompletionsOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; + $["EmbeddingsOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; + $["ImageGenerationOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; +``` + +## Polymorphic adjustments + +The polymorphic _input_ models all expose the discriminator but it's ignored when serializing +(ie, each type already knows the value and fills it in). So we'll just hide it. + +`ChatRequestMessageClassification.Role` + +```yaml +directive: + - from: swagger-document + where: $.definitions.ChatRequestMessage + transform: $.properties.role["x-ms-client-name"] = "InternalRoleRename" + - from: + - models.go + - models_serde.go + where: $ + transform: return $.replace(/InternalRoleRename/g, "role") +``` + +`AzureChatExtensionConfigurationClassification.Type` + +```yaml +directive: + - from: swagger-document + where: $.definitions.AzureChatExtensionConfiguration + transform: $.properties.type["x-ms-client-name"] = "InternalChatExtensionTypeRename" + - from: + - models.go + - models_serde.go + where: $ + transform: return $.replace(/InternalChatExtensionTypeRename/g, "configType") +``` + +`OnYourDataAuthenticationOptionsClassification.Type` + +```yaml +directive: + - from: swagger-document + where: $.definitions.OnYourDataAuthenticationOptions + transform: $.properties.type["x-ms-client-name"] = "InternalOYDAuthTypeRename" + - from: + - models.go + - models_serde.go + where: $ + transform: return $.replace(/InternalOYDAuthTypeRename/g, "configType") +``` + +## Model -> DeploymentName + +```yaml +directive: + - from: swagger-document + where: $.definitions + transform: | + $["AudioTranscriptionOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; + $["AudioTranslationOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; + $["ChatCompletionsOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; + $["CompletionsOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; + $["EmbeddingsOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; + $["ImageGenerationOptions"].properties["model"]["x-ms-client-name"] = "DeploymentName"; +``` + +## Cleanup the audio transcription APIs + +We're wrapping the audio translation and transcription APIs, so we can eliminate some of +these autogenerated models and functions. + +```yaml +directive: + # kill models + - from: + - models.go + - models_serde.go + where: $ + transform: | + return $ + .replace(/type XMSPathsHksgfdDeploymentsDeploymentidAudioTranscriptionsOverloadGetaudiotranscriptionasresponseobjectPostRequestbodyContentMultipartFormDataSchema struct.+?\n}/s, "") + .replace(/\/\/ MarshalJSON implements the json.Marshaller interface for type XMSPathsHksgfdDeploymentsDeploymentidAudioTranscriptionsOverloadGetaudiotranscriptionasresponseobjectPostRequestbodyContentMultipartFormDataSchema.+?\n}/s, "") + .replace(/\/\/ UnmarshalJSON implements the json.Unmarshaller interface for type XMSPathsHksgfdDeploymentsDeploymentidAudioTranscriptionsOverloadGetaudiotranscriptionasresponseobjectPostRequestbodyContentMultipartFormDataSchema.+?\n}/s, "") + .replace(/type XMSPaths1Ak7Ov3DeploymentsDeploymentidAudioTranslationsOverloadGetaudiotranslationasresponseobjectPostRequestbodyContentMultipartFormDataSchema struct.+?\n}/s, "") + .replace(/\/\/ MarshalJSON implements the json.Marshaller interface for type XMSPaths1Ak7Ov3DeploymentsDeploymentidAudioTranslationsOverloadGetaudiotranslationasresponseobjectPostRequestbodyContentMultipartFormDataSchema.+?\n}/s, "") + .replace(/\/\/ UnmarshalJSON implements the json.Unmarshaller interface for type XMSPaths1Ak7Ov3DeploymentsDeploymentidAudioTranslationsOverloadGetaudiotranslationasresponseobjectPostRequestbodyContentMultipartFormDataSchema.+?\n}/s, ""); + # kill API functions + - from: + - client.go + where: $ + transform: | + return $ + .replace(/\/\/ GetAudioTranscriptionAsPlainText -.+?\n}/s, "") + .replace(/\/\/ GetAudioTransclationAsPlainText -.+?\n}/s, ""); +``` - - from: openapi-document +## Move the Azure extensions into their own section of the options + +```yaml +# TODO: rename and move. +``` + + +We've moved these 'extension' data types into their own field. + +```yaml +directive: +- from: swagger-document + where: $.definitions.ChatCompletionsOptions.properties.dataSources + transform: $["x-ms-client-name"] = "AzureExtensionsOptions" +``` + +## Trim the Error object to match our Go conventions + +```yaml +directive: + - from: swagger-document + where: $.definitions["Azure.Core.Foundations.Error"] + transform: | + $.properties = { + code: $.properties["code"], + message: { + ...$.properties["message"], + "x-ms-client-name": "InternalErrorMessageRename" + }, + }; + $["x-ms-client-name"] = "Error"; + + - from: swagger-document + where: $.definitions + transform: delete $["Azure.Core.Foundations.InnerError"]; + + - from: + - models.go + - models_serde.go + where: $ + transform: return $.replace(/InternalErrorMessageRename/g, "message"); +``` + +## Splice in some hooks for custom code + +```yaml +directive: + # Allow interception of formatting the URL path + - from: client.go + where: $ + transform: | + return $ + .replace(/runtime\.JoinPaths\(client.endpoint, urlPath\)/g, "client.formatURL(urlPath, getDeployment(body))"); + + # Allow custom parsing of the returned error, mostly for handling the content filtering errors. + - from: client.go + where: $ + transform: return $.replace(/runtime\.NewResponseError/sg, "client.newError"); +``` + +Other misc fixes + +```yaml +directive: + - from: swagger-document where: $..paths["/deployments/{deploymentId}/completions"].post.requestBody transform: $["required"] = true; - - from: openapi-document + - from: swagger-document where: $.paths["/deployments/{deploymentId}/embeddings"].post.requestBody transform: $["required"] = true; # get rid of these auto-generated LRO status methods that aren't exposed. - - from: openapi-document + - from: swagger-document where: $.paths transform: delete $["/operations/images/{operationId}"] # Remove stream property from CompletionsOptions and ChatCompletionsOptions - - from: openapi-document - where: $.components.schemas["CompletionsOptions"] + - from: swagger-document + where: $.definitions["CompletionsOptions"] transform: delete $.properties.stream; - - from: openapi-document - where: $.components.schemas["ChatCompletionsOptions"] + - from: swagger-document + where: $.definitions["ChatCompletionsOptions"] transform: delete $.properties.stream; +``` - # Replace anyOf schemas with an empty schema (no type) to get an "any" type generated - - from: openapi-document - where: '$.components.schemas["EmbeddingsOptions"].properties["input"]' - transform: delete $.anyOf; - - - from: openapi-document - where: $.paths["/images/generations:submit"].post - transform: $["x-ms-long-running-operation"] = true; - - # Fix autorest bug - - from: openapi-document - where: $.components.schemas["BatchImageGenerationOperationResponse"].properties - transform: | - $.result["$ref"] = "#/components/schemas/ImageGenerations"; delete $.allOf; - $.status["$ref"] = "#/components/schemas/AzureOpenAIOperationState"; delete $.allOf; - $.error["$ref"] = "#/components/schemas/Azure.Core.Foundations.Error"; delete $.allOf; - - from: openapi-document - where: $.components.schemas["ChatMessage"].properties.role - transform: $["$ref"] = "#/components/schemas/ChatRole"; delete $.oneOf; - - from: openapi-document - where: $.components.schemas["Choice"].properties.finish_reason - transform: $["$ref"] = "#/components/schemas/CompletionsFinishReason"; delete $.oneOf; - - from: openapi-document - where: $.components.schemas["ImageOperation"].properties.status - transform: $["$ref"] = $.anyOf[0]["$ref"];delete $.anyOf; - - from: openapi-document - where: $.components.schemas.ImageGenerationOptions.properties - transform: | - $.size["$ref"] = "#/components/schemas/ImageSize"; delete $.allOf; - $.response_format["$ref"] = "#/components/schemas/ImageGenerationResponseFormat"; delete $.allOf; - - from: openapi-document - where: $.components.schemas["ImageOperationResponse"].properties - transform: | - $.status["$ref"] = "#/components/schemas/State"; delete $.status.allOf; - $.result["$ref"] = "#/components/schemas/ImageResponse"; delete $.status.allOf; - - from: openapi-document - where: $.components.schemas["ImageOperationStatus"].properties.status - transform: $["$ref"] = "#/components/schemas/State"; delete $.allOf; - - from: openapi-document - where: $.components.schemas["ContentFilterResult"].properties.severity - transform: $.$ref = $.allOf[0].$ref; delete $.allOf; - - from: openapi-document - where: $.components.schemas["ChatChoice"].properties.finish_reason - transform: $["$ref"] = "#/components/schemas/CompletionsFinishReason"; delete $.oneOf; - - from: openapi-document - where: $.components.schemas["AzureChatExtensionConfiguration"].properties.type - transform: $["$ref"] = "#/components/schemas/AzureChatExtensionType"; delete $.allOf; - - from: openapi-document - where: $.components.schemas["AzureChatExtensionConfiguration"].properties.type - transform: $["$ref"] = "#/components/schemas/AzureChatExtensionType"; delete $.allOf; - - from: openapi-document - where: $.components.schemas["AzureCognitiveSearchChatExtensionConfiguration"].properties.queryType - transform: $["$ref"] = "#/components/schemas/AzureCognitiveSearchQueryType"; delete $.allOf; - - from: openapi-document - where: $.components.schemas["ContentFilterResults"].properties.sexual - transform: $.$ref = $.allOf[0].$ref; delete $.allOf; - - from: openapi-document - where: $.components.schemas["ContentFilterResults"].properties.hate - transform: $.$ref = $.allOf[0].$ref; delete $.allOf; - - from: openapi-document - where: $.components.schemas["ContentFilterResults"].properties.self_harm - transform: $.$ref = $.allOf[0].$ref; delete $.allOf; - - from: openapi-document - where: $.components.schemas["ContentFilterResults"].properties.violence - transform: $.$ref = $.allOf[0].$ref; delete $.allOf; - - # - # [BEGIN] Whisper - # +Changes for audio/whisper APIs. +```yaml +directive: # the whisper operations are really long since they are a conglomeration of _all_ the # possible return types. - rename-operation: - from: getAudioTranscriptionAsPlainText_getAudioTranscriptionAsResponseObject + from: GetAudioTranscriptionAsResponseObject to: GetAudioTranscriptionInternal - rename-operation: - from: getAudioTranslationAsPlainText_getAudioTranslationAsResponseObject + from: GetAudioTranslationAsResponseObject to: GetAudioTranslationInternal - # fixup the responses - - from: openapi-document - where: $.paths["/deployments/{deploymentId}/audio/transcriptions"] - transform: | - delete $.post.responses["200"].statusCode; - $.post.responses["200"].content["application/json"].schema["$ref"] = "#/components/schemas/AudioTranscription"; delete $.post.responses["200"].content["application/json"].schema.anyOf; - - from: openapi-document - where: $.paths["/deployments/{deploymentId}/audio/translations"] - transform: | - delete $.post.responses["200"].statusCode; - $.post.responses["200"].content["application/json"].schema["$ref"] = "#/components/schemas/AudioTranscription"; delete $.post.responses["200"].content["application/json"].schema.anyOf; + - from: swagger-document + where: $["x-ms-paths"]["/deployments/{deploymentId}/audio/translations?_overload=getAudioTranslationAsResponseObject"].post + transform: $.operationId = "GetAudioTranslationInternal" + - from: swagger-document + where: $["x-ms-paths"]["/deployments/{deploymentId}/audio/transcriptions?_overload=getAudioTranscriptionAsResponseObject"].post + transform: $.operationId = "GetAudioTranscriptionInternal" # hide the generated functions, in favor of our public wrappers. - from: @@ -173,35 +274,19 @@ directive: where: $ transform: | return $ - .replace(/client\.getAudioTranscriptionInternalHandleResponse/g, "getAudioTranscriptionInternalHandleResponse") + .replace(/client\.getAudioTranscriptionInternalHandleResponse/g, "getAudioTranscriptionInternalHandleResponse") .replace(/client\.getAudioTranslationInternalHandleResponse/g, "getAudioTranslationInternalHandleResponse") - # Whisper openapi3 generation: we have two oneOf that point to the same type. - # and we want to activate our multipart support in the generator. - - from: openapi-document - where: $.paths - transform: | - let makeMultipart = (item) => { - if (item["application/json"] == null) { return item; } - item["multipart/form-data"] = { - ...item["application/json"] - }; - delete item["application/json"]; - } - makeMultipart($["/deployments/{deploymentId}/audio/transcriptions"].post.requestBody.content); - makeMultipart($["/deployments/{deploymentId}/audio/translations"].post.requestBody.content); - - - from: openapi-document - where: $.components.schemas - transform: | - let fix = (v) => { if (v.allOf != null) { v.$ref = v.allOf[0].$ref; delete v.allOf; } }; - - fix($.AudioTranscriptionOptions.properties.response_format); - fix($.AudioTranscription.properties.task); + # fix the file parameter to be a []byte. + - from: client.go + where: $ + transform: return $.replace(/^(func \(client \*Client\) getAudioTrans.+?)file string,(.+)$/mg, "$1file []byte,$2") +``` - fix($.AudioTranslationOptions.properties.response_format); - fix($.AudioTranslation.properties.task); +## Logprob casing fixes +```yaml +directive: - from: - options.go - models_serde.go @@ -211,23 +296,18 @@ directive: return $ .replace(/AvgLogprob \*float32/g, "AvgLogProb *float32") .replace(/(a|c)\.AvgLogprob/g, "$1.AvgLogProb") + - from: + - client.go + - models.go + - models_serde.go + - options.go + - response_types.go + where: $ + transform: return $.replace(/Logprobs/g, "LogProbs") +``` - # - # [END] Whisper - # - - # Fix "AutoGenerated" models - - from: openapi-document - where: $.components.schemas["ChatCompletions"].properties.usage - transform: > - delete $.allOf; - $["$ref"] = "#/components/schemas/CompletionsUsage"; - - from: openapi-document - where: $.components.schemas["Completions"].properties.usage - transform: > - delete $.allOf; - $["$ref"] = "#/components/schemas/CompletionsUsage"; - +```yaml +directive: # # strip out the deploymentID validation code - we absorbed this into the endpoint. # @@ -243,13 +323,6 @@ directive: /(\s+)urlPath\s*:=\s*"\/deployments\/\{deploymentId\}\/([^"]+)".+?url\.PathEscape.+?\n/gs, "$1urlPath := \"$2\"\n") - # Unexport the the poller state enum. - - from: - - constants.go - - models.go - where: $ - transform: return $.replace(/AzureOpenAIOperationState/g, "azureOpenAIOperationState"); - # splice out the auto-generated `deploymentID` field from the client - from: client.go where: $ @@ -304,156 +377,197 @@ directive: where: $ transform: return $.replace(/Client(\w+)((?:Options|Response))/g, "$1$2"); - # allow interception of formatting the URL path + # Make the Azure extensions internal - we expose these through the GetChatCompletions*() functions + # and just treat which endpoint we use as an implementation detail. - from: client.go where: $ transform: | return $ - .replace(/runtime\.JoinPaths\(client.endpoint, urlPath\)/g, "client.formatURL(urlPath, getDeployment(body))"); + .replace(/GetChatCompletionsWithAzureExtensions([ (])/g, "getChatCompletionsWithAzureExtensions$1") + .replace(/GetChatCompletions([ (])/g, "getChatCompletions$1"); +``` - - from: models.go +## Workarounds + +This handles a case where (depending on mixture of older and newer resources) we can potentially see +_either_ of these fields that represents the same data (prompt filter results). + +```yaml +directive: + - from: models_serde.go where: $ - transform: | - return $.replace(/(type ImageGenerations struct.+?)Data any/sg, "$1Data []ImageGenerationsDataItem") + transform: return $.replace(/case "prompt_filter_results":/g, 'case "prompt_annotations":\nfallthrough\ncase "prompt_filter_results":') +``` + +Update the ChatRequestUserMessage to allow for []ChatCompletionRequestMessageContentPartText _or_ +a string. - # delete the auto-generated ImageGenerationsDataItem, we handle that custom +```yaml +directive: - from: models.go where: $ - transform: return $.replace(/\/\/ ImageGenerationsDataItem represents[^}]+}/s, ""); + transform: return $.replace(/Content any/g, 'Content ChatRequestUserMessageContent') +``` - # rename the image constants - - from: constants.go - where: $ - transform: | - return $.replace(/ImageSizeFiveHundredTwelveX512/g, "ImageSize512x512") - .replace(/ImageSizeOneThousandTwentyFourX1024/g, "ImageSize1024x1024") - .replace(/ImageSizeTwoHundredFiftySixX256/g, "ImageSize256x256"); +Add in some types that are incorrectly not being exported in the generation - # scrub the Image(Payload|Location) deserializers. - - from: models_serde.go - where: $ +```yaml +directive: + - from: swagger-document + where: $.definitions transform: | - return $.replace(/\/\/ UnmarshalJSON implements the json.Unmarshaller interface for type ImagePayload.+?\n}/s, "") - .replace(/\/\/ MarshalJSON implements the json.Marshaller interface for type ImagePayload.+?\n}/s, "") - .replace(/\/\/ UnmarshalJSON implements the json.Unmarshaller interface for type ImageLocation.+?\n}/s, "") - .replace(/\/\/ MarshalJSON implements the json.Marshaller interface for type ImageLocation.+?\n}/s, ""); + $["ChatCompletionRequestMessageContentPartType"] = { + "type": "string", + "enum": [ + "image_url", + "text" + ], + "description": "The type of the content part.", + "x-ms-enum": { + "name": "ChatCompletionRequestMessageContentPartType", + "modelAsString": true, + "values": [ + { + "name": "image_url", + "value": "image_url", + "description": "Chat content contains an image URL" + }, + { + "name": "text", + "value": "text", + "description": "Chat content contains text" + }, + ] + } + }; + $["ChatCompletionRequestMessageContentPart"] = { + "title": "represents either an image URL or text content for a prompt", + "type": "object", + "discriminator": "type", + "properties": { + "type": { + "$ref": "#/definitions/ChatCompletionRequestMessageContentPartType" + } + }, + "required": [ + "type" + ], + }; + $["ChatCompletionRequestMessageContentPartImage"] = { + "type": "object", + "title": "represents an image URL, to be used as part of a prompt", + "properties": { + "image_url": { + "type": "object", + "title": "contains the URL and level of detail for an image prompt", + "properties": { + "url": { + "type": "string", + "description": "Either a URL of the image or the base64 encoded image data.", + "format": "uri" + }, + "detail": { + "type": "string", + "description": "Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).", + "enum": [ + "auto", + "low", + "high" + ], + "default": "auto" + } + }, + "required": [ + "url" + ] + } + }, + "allOf": [ + { + "$ref": "#/definitions/ChatCompletionRequestMessageContentPart" + } + ], + "required": [ + "image_url" + ], + "x-ms-discriminator-value": "image_url" + }; + $["ChatCompletionRequestMessageContentPartText"] = { + "type": "object", + "title": "represents text content, to be used as part of a prompt", + "properties": { + "text": { + "type": "string", + "description": "The text content." + } + }, + "allOf": [ + { + "$ref": "#/definitions/ChatCompletionRequestMessageContentPart" + } + ], + "required": [ + "text" + ], + "x-ms-discriminator-value": "text" + }; +``` - # hide the image generation pollers. - - rename-operation: - from: beginAzureBatchImageGeneration - to: azureBatchImageGenerationInternal - - from: - - client.go - - models.go - - models_serde.go - - options.go - - response_types.go - where: $ - transform: | - return $.replace(/GetAzureBatchImageGenerationOperationStatusResponse/g, "getAzureBatchImageGenerationOperationStatusResponse") - .replace(/AzureBatchImageGenerationInternalResponse/g, "azureBatchImageGenerationInternalResponse") - .replace(/GetAzureBatchImageGenerationOperationStatusOptions/g, "getAzureBatchImageGenerationOperationStatusOptions") - .replace(/GetAzureBatchImageGenerationOperationStatus/g, "getAzureBatchImageGenerationOperationStatus") - .replace(/BeginAzureBatchImageGenerationInternal/g, "beginAzureBatchImageGeneration") - .replace(/BatchImageGenerationOperationResponse/g, "batchImageGenerationOperationResponse"); - - # BUG: ChatCompletionsOptionsFunctionCall is another one of those "here's mutually exclusive values" options... - - from: - - models.go - - models_serde.go - where: $ - transform: | - return $ - .replace(/populateAny\(objectMap, "function_call", c.FunctionCall\)/, 'populate(objectMap, "function_call", c.FunctionCall)') - .replace(/\/\/ ChatCompletionsOptionsFunctionCall.+?\n}/, "") - .replace(/FunctionCall any/, "FunctionCall *ChatCompletionsOptionsFunctionCall"); - - # fix some casing - - from: - - client.go +Polymorphic removal of the Type field: `ChatCompletionRequestMessageContentPartClassification.Type` + +```yaml +directive: + - from: swagger-document + where: $.definitions.ChatCompletionRequestMessageContentPart + transform: $.properties.type["x-ms-client-name"] = "ChatCompletionRequestMessageContentPartTypeRename" + - from: - models.go - models_serde.go - - options.go - - response_types.go where: $ - transform: return $.replace(/Logprobs/g, "LogProbs") + transform: return $.replace(/ChatCompletionRequestMessageContentPartTypeRename/g, "partType") +``` - - from: constants.go - where: $ - transform: return $.replace(/\/\/ PossibleazureOpenAIOperationStateValues returns.+?\n}/s, ""); +Another workaround - streaming results don't contain the discriminator field so we'll +inject it when we can infer it properly ('function' property exists). - # fix incorrect property name for content filtering - # TODO: I imagine we should able to fix this in the tsp? - - from: models_serde.go +```yaml +directive: + - from: polymorphic_helpers.go where: $ transform: | - return $ - .replace(/ case "selfHarm":/g, ' case "self_harm":') - .replace(/populate\(objectMap, "selfHarm", c.SelfHarm\)/g, 'populate(objectMap, "self_harm", c.SelfHarm)'); + return $.replace(/(func unmarshalChatCompletionsToolCallClassification.+?var b ChatCompletionsToolCallClassification\n)/s, + `$1\n` + + `if m["type"] == nil && m["function"] != nil {\n` + + ` // WORKAROUND: the streaming results don't contain the proper role for functions, so we need to add these in.\n` + + ` m["type"] = string(ChatRoleFunction)\n` + + `}\n`); +``` - - from: client.go - where: $ - transform: return $.replace(/runtime\.NewResponseError/sg, "client.newError"); +Add in the older style of function call as that's still the only way to talk to older models. - # - # rename `Model` to `Deployment` - # +```yaml +directive: - from: models.go where: $ transform: | - return $ - .replace(/\/\/ The model.*?Model \*string/sg, "// REQUIRED: Deployment specifies the name of the deployment (for Azure OpenAI) or model (for OpenAI) to use for this request.\nDeployment string"); - + const text = + `// Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the\n` + + `// end-user. "auto" means the model can pick between an end-user or calling a\n` + + `// function. Specifying a particular function via {"name": "my_function"} forces the model to call that function. "none" is\n` + + `// the default when no functions are present. "auto" is the default if functions\n` + + `// are present.\n` + + `FunctionCall *ChatCompletionsOptionsFunctionCall\n` + + `\n` + + `// A list of functions the model may generate JSON inputs for.\n` + + `Functions []FunctionDefinition\n`; + + return $.replace(/(type ChatCompletionsOptions struct \{.+?FrequencyPenalty \*float32)/s, "$1\n\n" + text); - from: models_serde.go where: $ transform: | - return $ - .replace(/populate\(objectMap, "model", (c|e|a).Model\)/g, 'populate(objectMap, "model", &$1.Deployment)') - .replace(/err = unpopulate\(val, "Model", &(c|e|a).Model\)/g, 'err = unpopulate(val, "Model", &$1.Deployment)'); + const populateLines = + `populate(objectMap, "function_call", c.FunctionCall)\n` + + `populate(objectMap, "functions", c.Functions)`; - # Make the Azure extensions internal - we expose these through the GetChatCompletions*() functions - # and just treat which endpoint we use as an implementation detail. - - from: client.go - where: $ - transform: | - return $ - .replace(/GetChatCompletionsWithAzureExtensions([ (])/g, "getChatCompletionsWithAzureExtensions$1") - .replace(/GetChatCompletions([ (])/g, "getChatCompletions$1"); - - # move the Azure extensions options into place - - from: models.go - where: $ - transform: return $.replace(/(\/\/ The configuration entries for Azure OpenAI.+?)DataSources \[\]AzureChatExtensionConfiguration/s, "$1AzureExtensionsOptions *AzureChatExtensionOptions"); - - from: models_serde.go - where: $ - transform: | - return $ - .replace(/populate\(objectMap, "dataSources", c.DataSources\)/, 'if c.AzureExtensionsOptions != nil { populate(objectMap, "dataSources", c.AzureExtensionsOptions.Extensions) }') - // technically not used, but let's be completionists... - .replace(/err = unpopulate\(val, "DataSources", &c.DataSources\)/, 'c.AzureExtensionsOptions = &AzureChatExtensionOptions{}; err = unpopulate(val, "DataSources", &c.AzureExtensionsOptions.Extensions)') - - # try to fix some of the generated types. - - # swap the `Parameters` and `Type` fields (Type really drives what's in Parameters) - - from: models.go - where: $ - transform: | - let typeRE = /(\/\/ REQUIRED; The label for the type of an Azure chat extension.*?Type \*AzureChatExtensionType)/s; - let paramsRE = /(\/\/ REQUIRED; The configuration payload used for the Azure chat extension.*?Parameters any)/s; - - return $ - .replace(paramsRE, "") - .replace(typeRE, $.match(typeRE)[1] + "\n\n" + $.match(paramsRE)[1]); - - - from: constants.go - where: $ - transform: | - return $.replace( - /(AzureChatExtensionTypeAzureCognitiveSearch AzureChatExtensionType)/, - "// AzureChatExtensionTypeAzureCognitiveSearch enables the use of an Azure Cognitive Search index with chat completions.\n// [AzureChatExtensionConfiguration.Parameter] should be of type [AzureCognitiveSearchChatExtensionConfiguration].\n$1"); - - # HACK: prompt_filter_results <-> prompt_annotations change - - from: models_serde.go - where: $ - transform: return $.replace(/case "prompt_filter_results":/g, 'case "prompt_annotations":\nfallthrough\ncase "prompt_filter_results":') + return $.replace(/(func \(c ChatCompletionsOptions\) MarshalJSON\(\).+?populate\(objectMap, "frequency_penalty", c.FrequencyPenalty\))/s, "$1\n" + populateLines) ``` diff --git a/sdk/ai/azopenai/build.go b/sdk/ai/azopenai/build.go index 88088a8e55c1..a3e6fe79680f 100644 --- a/sdk/ai/azopenai/build.go +++ b/sdk/ai/azopenai/build.go @@ -3,7 +3,6 @@ //go:build go1.18 // +build go1.18 -//go:generate pwsh ./genopenapi3.ps1 //go:generate autorest ./autorest.md //go:generate go mod tidy //go:generate goimports -w . diff --git a/sdk/ai/azopenai/client.go b/sdk/ai/azopenai/client.go index 59c064e9bbf2..cc3e175e010b 100644 --- a/sdk/ai/azopenai/client.go +++ b/sdk/ai/azopenai/client.go @@ -10,6 +10,7 @@ package azopenai import ( "context" + "io" "net/http" "github.com/Azure/azure-sdk-for-go/sdk/azcore" @@ -24,55 +25,15 @@ type Client struct { clientData } -// beginAzureBatchImageGeneration - Starts the generation of a batch of images from a text caption -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-09-01-preview -// - options - beginAzureBatchImageGenerationOptions contains the optional parameters for the Client.beginAzureBatchImageGeneration -// method. -func (client *Client) beginAzureBatchImageGeneration(ctx context.Context, body ImageGenerationOptions, options *beginAzureBatchImageGenerationOptions) (*runtime.Poller[azureBatchImageGenerationInternalResponse], error) { - if options == nil || options.ResumeToken == "" { - resp, err := client.azureBatchImageGenerationInternal(ctx, body, options) - if err != nil { - return nil, err - } - poller, err := runtime.NewPoller[azureBatchImageGenerationInternalResponse](resp, client.internal.Pipeline(), nil) - return poller, err - } else { - return runtime.NewPollerFromResumeToken[azureBatchImageGenerationInternalResponse](options.ResumeToken, client.internal.Pipeline(), nil) - } -} - -// AzureBatchImageGenerationInternal - Starts the generation of a batch of images from a text caption -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-09-01-preview -func (client *Client) azureBatchImageGenerationInternal(ctx context.Context, body ImageGenerationOptions, options *beginAzureBatchImageGenerationOptions) (*http.Response, error) { - var err error - req, err := client.azureBatchImageGenerationInternalCreateRequest(ctx, body, options) - if err != nil { - return nil, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return nil, err - } - if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { - err = client.newError(httpResp) - return nil, err - } - return httpResp, nil -} - -// azureBatchImageGenerationInternalCreateRequest creates the AzureBatchImageGenerationInternal request. -func (client *Client) azureBatchImageGenerationInternalCreateRequest(ctx context.Context, body ImageGenerationOptions, options *beginAzureBatchImageGenerationOptions) (*policy.Request, error) { - urlPath := "/images/generations:submit" +// getAudioTranscriptionAsPlainTextCreateRequest creates the GetAudioTranscriptionAsPlainText request. +func (client *Client) getAudioTranscriptionAsPlainTextCreateRequest(ctx context.Context, body AudioTranscriptionOptions, options *GetAudioTranscriptionAsPlainTextOptions) (*policy.Request, error) { + urlPath := "audio/transcriptions" req, err := runtime.NewRequest(ctx, http.MethodPost, client.formatURL(urlPath, getDeployment(body))) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01-preview") + reqQP.Set("api-version", "2023-12-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, body); err != nil { @@ -81,20 +42,29 @@ func (client *Client) azureBatchImageGenerationInternalCreateRequest(ctx context return req, nil } +// getAudioTranscriptionAsPlainTextHandleResponse handles the GetAudioTranscriptionAsPlainText response. +func (client *Client) getAudioTranscriptionAsPlainTextHandleResponse(resp *http.Response) (GetAudioTranscriptionAsPlainTextResponse, error) { + result := GetAudioTranscriptionAsPlainTextResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Value); err != nil { + return GetAudioTranscriptionAsPlainTextResponse{}, err + } + return result, nil +} + // getAudioTranscriptionInternal - Gets transcribed text and associated metadata from provided spoken audio data. Audio will -// be transcribed in the written language corresponding to the language it was spoken in. Gets transcribed text -// and associated metadata from provided spoken audio data. Audio will be transcribed in the written language corresponding -// to the language it was spoken in. +// be transcribed in the written language corresponding to the language it was spoken in. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01-preview +// Generated from API version 2023-12-01-preview +// - deploymentID - Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure +// OpenAI) to use for this request. // - file - The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: flac, // mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. // - options - getAudioTranscriptionInternalOptions contains the optional parameters for the Client.getAudioTranscriptionInternal // method. -func (client *Client) getAudioTranscriptionInternal(ctx context.Context, file []byte, options *getAudioTranscriptionInternalOptions) (getAudioTranscriptionInternalResponse, error) { +func (client *Client) getAudioTranscriptionInternal(ctx context.Context, deploymentID string, file io.ReadSeekCloser, options *getAudioTranscriptionInternalOptions) (getAudioTranscriptionInternalResponse, error) { var err error - req, err := client.getAudioTranscriptionInternalCreateRequest(ctx, file, options) + req, err := client.getAudioTranscriptionInternalCreateRequest(ctx, deploymentID, file, options) if err != nil { return getAudioTranscriptionInternalResponse{}, err } @@ -111,14 +81,14 @@ func (client *Client) getAudioTranscriptionInternal(ctx context.Context, file [] } // getAudioTranscriptionInternalCreateRequest creates the getAudioTranscriptionInternal request. -func (client *Client) getAudioTranscriptionInternalCreateRequest(ctx context.Context, file []byte, body *getAudioTranscriptionInternalOptions) (*policy.Request, error) { +func (client *Client) getAudioTranscriptionInternalCreateRequest(ctx context.Context, deploymentID string, file io.ReadSeekCloser, body *getAudioTranscriptionInternalOptions) (*policy.Request, error) { urlPath := "audio/transcriptions" req, err := runtime.NewRequest(ctx, http.MethodPost, client.formatURL(urlPath, getDeployment(body))) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01-preview") + reqQP.Set("api-version", "2023-12-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := setMultipartFormData(req, file, *body); err != nil { @@ -136,18 +106,71 @@ func (client *Client) getAudioTranscriptionInternalHandleResponse(resp *http.Res return result, nil } +// GetAudioTranslationAsPlainText - Gets English language transcribed text and associated metadata from provided spoken audio +// data. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-12-01-preview +// - options - GetAudioTranslationAsPlainTextOptions contains the optional parameters for the Client.GetAudioTranslationAsPlainText +// method. +func (client *Client) GetAudioTranslationAsPlainText(ctx context.Context, body AudioTranslationOptions, options *GetAudioTranslationAsPlainTextOptions) (GetAudioTranslationAsPlainTextResponse, error) { + var err error + req, err := client.getAudioTranslationAsPlainTextCreateRequest(ctx, body, options) + if err != nil { + return GetAudioTranslationAsPlainTextResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetAudioTranslationAsPlainTextResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = client.newError(httpResp) + return GetAudioTranslationAsPlainTextResponse{}, err + } + resp, err := client.getAudioTranslationAsPlainTextHandleResponse(httpResp) + return resp, err +} + +// getAudioTranslationAsPlainTextCreateRequest creates the GetAudioTranslationAsPlainText request. +func (client *Client) getAudioTranslationAsPlainTextCreateRequest(ctx context.Context, body AudioTranslationOptions, options *GetAudioTranslationAsPlainTextOptions) (*policy.Request, error) { + urlPath := "audio/translations" + req, err := runtime.NewRequest(ctx, http.MethodPost, client.formatURL(urlPath, getDeployment(body))) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-12-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, body); err != nil { + return nil, err + } + return req, nil +} + +// getAudioTranslationAsPlainTextHandleResponse handles the GetAudioTranslationAsPlainText response. +func (client *Client) getAudioTranslationAsPlainTextHandleResponse(resp *http.Response) (GetAudioTranslationAsPlainTextResponse, error) { + result := GetAudioTranslationAsPlainTextResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Value); err != nil { + return GetAudioTranslationAsPlainTextResponse{}, err + } + return result, nil +} + // getAudioTranslationInternal - Gets English language transcribed text and associated metadata from provided spoken audio -// data. Gets English language transcribed text and associated metadata from provided spoken audio data. +// data. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01-preview +// Generated from API version 2023-12-01-preview +// - deploymentID - Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure +// OpenAI) to use for this request. // - file - The audio data to translate. This must be the binary content of a file in one of the supported media formats: flac, // mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. // - options - getAudioTranslationInternalOptions contains the optional parameters for the Client.getAudioTranslationInternal // method. -func (client *Client) getAudioTranslationInternal(ctx context.Context, file []byte, options *getAudioTranslationInternalOptions) (getAudioTranslationInternalResponse, error) { +func (client *Client) getAudioTranslationInternal(ctx context.Context, deploymentID string, file io.ReadSeekCloser, options *getAudioTranslationInternalOptions) (getAudioTranslationInternalResponse, error) { var err error - req, err := client.getAudioTranslationInternalCreateRequest(ctx, file, options) + req, err := client.getAudioTranslationInternalCreateRequest(ctx, deploymentID, file, options) if err != nil { return getAudioTranslationInternalResponse{}, err } @@ -164,14 +187,14 @@ func (client *Client) getAudioTranslationInternal(ctx context.Context, file []by } // getAudioTranslationInternalCreateRequest creates the getAudioTranslationInternal request. -func (client *Client) getAudioTranslationInternalCreateRequest(ctx context.Context, file []byte, body *getAudioTranslationInternalOptions) (*policy.Request, error) { +func (client *Client) getAudioTranslationInternalCreateRequest(ctx context.Context, deploymentID string, file io.ReadSeekCloser, body *getAudioTranslationInternalOptions) (*policy.Request, error) { urlPath := "audio/translations" req, err := runtime.NewRequest(ctx, http.MethodPost, client.formatURL(urlPath, getDeployment(body))) if err != nil { return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01-preview") + reqQP.Set("api-version", "2023-12-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := setMultipartFormData(req, file, *body); err != nil { @@ -183,7 +206,7 @@ func (client *Client) getAudioTranslationInternalCreateRequest(ctx context.Conte // getAudioTranslationInternalHandleResponse handles the getAudioTranslationInternal response. func (client *Client) getAudioTranslationInternalHandleResponse(resp *http.Response) (getAudioTranslationInternalResponse, error) { result := getAudioTranslationInternalResponse{} - if err := runtime.UnmarshalAsJSON(resp, &result.AudioTranscription); err != nil { + if err := runtime.UnmarshalAsJSON(resp, &result.AudioTranslation); err != nil { return getAudioTranslationInternalResponse{}, err } return result, nil @@ -193,7 +216,7 @@ func (client *Client) getAudioTranslationInternalHandleResponse(resp *http.Respo // and generate text that continues from or "completes" provided prompt data. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01-preview +// Generated from API version 2023-12-01-preview // - options - GetChatCompletionsOptions contains the optional parameters for the Client.getChatCompletions method. func (client *Client) getChatCompletions(ctx context.Context, body ChatCompletionsOptions, options *GetChatCompletionsOptions) (GetChatCompletionsResponse, error) { var err error @@ -221,7 +244,7 @@ func (client *Client) getChatCompletionsCreateRequest(ctx context.Context, body return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01-preview") + reqQP.Set("api-version", "2023-12-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, body); err != nil { @@ -244,7 +267,7 @@ func (client *Client) getChatCompletionsHandleResponse(resp *http.Response) (Get // chat completions capabilities. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01-preview +// Generated from API version 2023-12-01-preview // - options - GetChatCompletionsWithAzureExtensionsOptions contains the optional parameters for the Client.GetChatCompletionsWithAzureExtensions // method. func (client *Client) getChatCompletionsWithAzureExtensions(ctx context.Context, body ChatCompletionsOptions, options *GetChatCompletionsWithAzureExtensionsOptions) (GetChatCompletionsWithAzureExtensionsResponse, error) { @@ -273,7 +296,7 @@ func (client *Client) getChatCompletionsWithAzureExtensionsCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01-preview") + reqQP.Set("api-version", "2023-12-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, body); err != nil { @@ -295,7 +318,7 @@ func (client *Client) getChatCompletionsWithAzureExtensionsHandleResponse(resp * // text that continues from or "completes" provided prompt data. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01-preview +// Generated from API version 2023-12-01-preview // - options - GetCompletionsOptions contains the optional parameters for the Client.GetCompletions method. func (client *Client) GetCompletions(ctx context.Context, body CompletionsOptions, options *GetCompletionsOptions) (GetCompletionsResponse, error) { var err error @@ -323,7 +346,7 @@ func (client *Client) getCompletionsCreateRequest(ctx context.Context, body Comp return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01-preview") + reqQP.Set("api-version", "2023-12-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, body); err != nil { @@ -344,7 +367,7 @@ func (client *Client) getCompletionsHandleResponse(resp *http.Response) (GetComp // GetEmbeddings - Return the embeddings for a given prompt. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-09-01-preview +// Generated from API version 2023-12-01-preview // - options - GetEmbeddingsOptions contains the optional parameters for the Client.GetEmbeddings method. func (client *Client) GetEmbeddings(ctx context.Context, body EmbeddingsOptions, options *GetEmbeddingsOptions) (GetEmbeddingsResponse, error) { var err error @@ -372,7 +395,7 @@ func (client *Client) getEmbeddingsCreateRequest(ctx context.Context, body Embed return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-09-01-preview") + reqQP.Set("api-version", "2023-12-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, body); err != nil { @@ -389,3 +412,52 @@ func (client *Client) getEmbeddingsHandleResponse(resp *http.Response) (GetEmbed } return result, nil } + +// GetImageGenerations - Creates an image given a prompt. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-12-01-preview +// - options - GetImageGenerationsOptions contains the optional parameters for the Client.GetImageGenerations method. +func (client *Client) GetImageGenerations(ctx context.Context, body ImageGenerationOptions, options *GetImageGenerationsOptions) (GetImageGenerationsResponse, error) { + var err error + req, err := client.getImageGenerationsCreateRequest(ctx, body, options) + if err != nil { + return GetImageGenerationsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetImageGenerationsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = client.newError(httpResp) + return GetImageGenerationsResponse{}, err + } + resp, err := client.getImageGenerationsHandleResponse(httpResp) + return resp, err +} + +// getImageGenerationsCreateRequest creates the GetImageGenerations request. +func (client *Client) getImageGenerationsCreateRequest(ctx context.Context, body ImageGenerationOptions, options *GetImageGenerationsOptions) (*policy.Request, error) { + urlPath := "images/generations" + req, err := runtime.NewRequest(ctx, http.MethodPost, client.formatURL(urlPath, getDeployment(body))) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-12-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, body); err != nil { + return nil, err + } + return req, nil +} + +// getImageGenerationsHandleResponse handles the GetImageGenerations response. +func (client *Client) getImageGenerationsHandleResponse(resp *http.Response) (GetImageGenerationsResponse, error) { + result := GetImageGenerationsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ImageGenerations); err != nil { + return GetImageGenerationsResponse{}, err + } + return result, nil +} diff --git a/sdk/ai/azopenai/client_audio_internal_test.go b/sdk/ai/azopenai/client_audio_internal_test.go index 37eccb68334b..e5d6385231cd 100644 --- a/sdk/ai/azopenai/client_audio_internal_test.go +++ b/sdk/ai/azopenai/client_audio_internal_test.go @@ -16,16 +16,21 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/stretchr/testify/require" ) +func newSeekableBytes(data []byte) io.ReadSeekCloser { + return streaming.NopCloser(bytes.NewReader(data)) +} + func TestSetMultipartFormData(t *testing.T) { t.Run("getAudioTranscriptionInternalOptions", func(t *testing.T) { req, err := runtime.NewRequest(context.Background(), "POST", "http://localhost") require.NoError(t, err) - err = setMultipartFormData(req, []byte{1, 2, 3}, getAudioTranscriptionInternalOptions{ + err = setMultipartFormData(req, newSeekableBytes([]byte{1, 2, 3}), getAudioTranscriptionInternalOptions{ Language: to.Ptr("en"), Model: to.Ptr("hello"), Prompt: to.Ptr("my prompt"), @@ -53,7 +58,7 @@ func TestSetMultipartFormData(t *testing.T) { req, err := runtime.NewRequest(context.Background(), "POST", "http://localhost") require.NoError(t, err) - err = setMultipartFormData(req, []byte{1, 2, 3}, getAudioTranslationInternalOptions{ + err = setMultipartFormData(req, newSeekableBytes([]byte{1, 2, 3}), getAudioTranslationInternalOptions{ Model: to.Ptr("hello"), Prompt: to.Ptr("my prompt"), ResponseFormat: to.Ptr(AudioTranslationFormatJSON), diff --git a/sdk/ai/azopenai/client_audio_test.go b/sdk/ai/azopenai/client_audio_test.go index d81f181791b4..6a59aa2c6242 100644 --- a/sdk/ai/azopenai/client_audio_test.go +++ b/sdk/ai/azopenai/client_audio_test.go @@ -22,8 +22,8 @@ func TestClient_GetAudioTranscription_AzureOpenAI(t *testing.T) { t.Skipf("Recording needs to be revisited for multipart: https://github.com/Azure/azure-sdk-for-go/issues/21598") } - client := newTestClient(t, azureWhisper, withForgivingRetryOption()) - runTranscriptionTests(t, client, azureWhisperModel) + client := newTestClient(t, azureOpenAI.Whisper.Endpoint, withForgivingRetryOption()) + runTranscriptionTests(t, client, azureOpenAI.Whisper.Model) } func TestClient_GetAudioTranscription_OpenAI(t *testing.T) { @@ -36,7 +36,7 @@ func TestClient_GetAudioTranscription_OpenAI(t *testing.T) { mp3Bytes, err := os.ReadFile(`testdata/sampledata_audiofiles_myVoiceIsMyPassportVerifyMe01.mp3`) require.NoError(t, err) - args := newTranscriptionOptions(azopenai.AudioTranscriptionFormatVerboseJSON, openAIWhisperModel, mp3Bytes) + args := newTranscriptionOptions(azopenai.AudioTranscriptionFormatVerboseJSON, openAI.Whisper.Model, mp3Bytes) transcriptResp, err := client.GetAudioTranscription(context.Background(), args, nil) require.NoError(t, err) require.NotEmpty(t, transcriptResp) @@ -54,8 +54,8 @@ func TestClient_GetAudioTranslation_AzureOpenAI(t *testing.T) { t.Skipf("Recording needs to be revisited for multipart: https://github.com/Azure/azure-sdk-for-go/issues/21598") } - client := newTestClient(t, azureWhisper, withForgivingRetryOption()) - runTranslationTests(t, client, azureWhisperModel) + client := newTestClient(t, azureOpenAI.Whisper.Endpoint, withForgivingRetryOption()) + runTranslationTests(t, client, azureOpenAI.Whisper.Model) } func TestClient_GetAudioTranslation_OpenAI(t *testing.T) { @@ -68,7 +68,7 @@ func TestClient_GetAudioTranslation_OpenAI(t *testing.T) { mp3Bytes, err := os.ReadFile(`testdata/sampledata_audiofiles_myVoiceIsMyPassportVerifyMe01.mp3`) require.NoError(t, err) - args := newTranslationOptions(azopenai.AudioTranslationFormatVerboseJSON, openAIWhisperModel, mp3Bytes) + args := newTranslationOptions(azopenai.AudioTranslationFormatVerboseJSON, openAI.Whisper.Model, mp3Bytes) transcriptResp, err := client.GetAudioTranslation(context.Background(), args, nil) require.NoError(t, err) require.NotEmpty(t, transcriptResp) @@ -151,7 +151,7 @@ func runTranslationTests(t *testing.T, client *azopenai.Client, model string) { require.NotEmpty(t, transcriptResp) require.NotEmpty(t, *transcriptResp.Text) - requireEmptyAudioTranscription(t, transcriptResp.AudioTranscription) + requireEmptyAudioTranslation(t, transcriptResp.AudioTranslation) }) t.Run(string(azopenai.AudioTranscriptionFormatSrt), func(t *testing.T) { @@ -161,7 +161,7 @@ func runTranslationTests(t *testing.T, client *azopenai.Client, model string) { require.NotEmpty(t, transcriptResp) require.NotEmpty(t, *transcriptResp.Text) - requireEmptyAudioTranscription(t, transcriptResp.AudioTranscription) + requireEmptyAudioTranslation(t, transcriptResp.AudioTranslation) }) t.Run(string(azopenai.AudioTranscriptionFormatVtt), func(t *testing.T) { @@ -171,7 +171,7 @@ func runTranslationTests(t *testing.T, client *azopenai.Client, model string) { require.NotEmpty(t, transcriptResp) require.NotEmpty(t, *transcriptResp.Text) - requireEmptyAudioTranscription(t, transcriptResp.AudioTranscription) + requireEmptyAudioTranslation(t, transcriptResp.AudioTranslation) }) t.Run(string(azopenai.AudioTranscriptionFormatVerboseJSON), func(t *testing.T) { @@ -195,13 +195,13 @@ func runTranslationTests(t *testing.T, client *azopenai.Client, model string) { require.NotEmpty(t, transcriptResp) require.NotEmpty(t, *transcriptResp.Text) - requireEmptyAudioTranscription(t, transcriptResp.AudioTranscription) + requireEmptyAudioTranslation(t, transcriptResp.AudioTranslation) }) } func newTranscriptionOptions(format azopenai.AudioTranscriptionFormat, model string, mp3Bytes []byte) azopenai.AudioTranscriptionOptions { return azopenai.AudioTranscriptionOptions{ - Deployment: model, + DeploymentName: to.Ptr(model), File: mp3Bytes, ResponseFormat: &format, Language: to.Ptr("en"), @@ -211,7 +211,7 @@ func newTranscriptionOptions(format azopenai.AudioTranscriptionFormat, model str func newTranslationOptions(format azopenai.AudioTranslationFormat, model string, mp3Bytes []byte) azopenai.AudioTranslationOptions { return azopenai.AudioTranslationOptions{ - Deployment: model, + DeploymentName: to.Ptr(model), File: mp3Bytes, ResponseFormat: &format, Temperature: to.Ptr[float32](0.0), @@ -228,3 +228,12 @@ func requireEmptyAudioTranscription(t *testing.T, at azopenai.AudioTranscription require.Empty(t, at.Segments) require.Empty(t, at.Task) } + +func requireEmptyAudioTranslation(t *testing.T, at azopenai.AudioTranslation) { + // Text is always filled out for + + require.Empty(t, at.Duration) + require.Empty(t, at.Language) + require.Empty(t, at.Segments) + require.Empty(t, at.Task) +} diff --git a/sdk/ai/azopenai/client_chat_completions_extensions_test.go b/sdk/ai/azopenai/client_chat_completions_extensions_test.go index 5a79870dc8aa..4647b343cbec 100644 --- a/sdk/ai/azopenai/client_chat_completions_extensions_test.go +++ b/sdk/ai/azopenai/client_chat_completions_extensions_test.go @@ -18,22 +18,17 @@ import ( ) func TestChatCompletions_extensions_bringYourOwnData(t *testing.T) { - client := newAzureOpenAIClientForTest(t, azureOpenAI) + client := newTestClient(t, azureOpenAI.ChatCompletionsOYD.Endpoint) resp, err := client.GetChatCompletions(context.Background(), azopenai.ChatCompletionsOptions{ - Messages: []azopenai.ChatMessage{ - {Content: to.Ptr("What does PR complete mean?"), Role: to.Ptr(azopenai.ChatRoleUser)}, + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("What does PR complete mean?")}, }, MaxTokens: to.Ptr[int32](512), - AzureExtensionsOptions: &azopenai.AzureChatExtensionOptions{ - Extensions: []azopenai.AzureChatExtensionConfiguration{ - { - Type: to.Ptr(azopenai.AzureChatExtensionTypeAzureCognitiveSearch), - Parameters: azureOpenAI.Cognitive, - }, - }, + AzureExtensionsOptions: []azopenai.AzureChatExtensionConfigurationClassification{ + &azureOpenAI.Cognitive, }, - Deployment: "gpt-4", + DeploymentName: &azureOpenAI.ChatCompletionsOYD.Model, }, nil) require.NoError(t, err) @@ -44,26 +39,21 @@ func TestChatCompletions_extensions_bringYourOwnData(t *testing.T) { require.Equal(t, azopenai.ChatRoleTool, *msgContext.Messages[0].Role) require.NotEmpty(t, *resp.Choices[0].Message.Content) - require.Equal(t, azopenai.CompletionsFinishReasonStop, *resp.Choices[0].FinishReason) + require.Equal(t, azopenai.CompletionsFinishReasonStopped, *resp.Choices[0].FinishReason) } func TestChatExtensionsStreaming_extensions_bringYourOwnData(t *testing.T) { - client := newAzureOpenAIClientForTest(t, azureOpenAICanary) + client := newTestClient(t, azureOpenAI.ChatCompletionsOYD.Endpoint) streamResp, err := client.GetChatCompletionsStream(context.Background(), azopenai.ChatCompletionsOptions{ - Messages: []azopenai.ChatMessage{ - {Content: to.Ptr("What does PR complete mean?"), Role: to.Ptr(azopenai.ChatRoleUser)}, + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("What does PR complete mean?")}, }, MaxTokens: to.Ptr[int32](512), - AzureExtensionsOptions: &azopenai.AzureChatExtensionOptions{ - Extensions: []azopenai.AzureChatExtensionConfiguration{ - { - Type: to.Ptr(azopenai.AzureChatExtensionTypeAzureCognitiveSearch), - Parameters: azureOpenAICanary.Cognitive, - }, - }, + AzureExtensionsOptions: []azopenai.AzureChatExtensionConfigurationClassification{ + &azureOpenAI.Cognitive, }, - Deployment: "gpt-4", + DeploymentName: &azureOpenAI.ChatCompletionsOYD.Model, }, nil) require.NoError(t, err) diff --git a/sdk/ai/azopenai/client_chat_completions_test.go b/sdk/ai/azopenai/client_chat_completions_test.go index b580cb5108af..a86cd11ffb4e 100644 --- a/sdk/ai/azopenai/client_chat_completions_test.go +++ b/sdk/ai/azopenai/client_chat_completions_test.go @@ -23,17 +23,16 @@ import ( "github.com/stretchr/testify/require" ) -func newTestChatCompletionOptions(tv testVars) azopenai.ChatCompletionsOptions { +func newTestChatCompletionOptions(deployment string) azopenai.ChatCompletionsOptions { return azopenai.ChatCompletionsOptions{ - Messages: []azopenai.ChatMessage{ - { - Role: to.Ptr(azopenai.ChatRole("user")), - Content: to.Ptr("Count to 10, with a comma between each number, no newlines and a period at the end. E.g., 1, 2, 3, ..."), + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestUserMessage{ + Content: azopenai.NewChatRequestUserMessageContent("Count to 10, with a comma between each number, no newlines and a period at the end. E.g., 1, 2, 3, ..."), }, }, - MaxTokens: to.Ptr(int32(1024)), - Temperature: to.Ptr(float32(0.0)), - Deployment: tv.ChatCompletions, + MaxTokens: to.Ptr(int32(1024)), + Temperature: to.Ptr(float32(0.0)), + DeploymentName: &deployment, } } @@ -42,12 +41,12 @@ var expectedRole = azopenai.ChatRoleAssistant func TestClient_GetChatCompletions(t *testing.T) { client := newTestClient(t, azureOpenAI.Endpoint) - testGetChatCompletions(t, client, azureOpenAI) + testGetChatCompletions(t, client, azureOpenAI.ChatCompletionsRAI.Model, true) } func TestClient_GetChatCompletionsStream(t *testing.T) { - chatClient := newAzureOpenAIClientForTest(t, azureOpenAICanary) - testGetChatCompletionsStream(t, chatClient, azureOpenAICanary) + chatClient := newTestClient(t, azureOpenAI.ChatCompletionsRAI.Endpoint) + testGetChatCompletionsStream(t, chatClient, azureOpenAI.ChatCompletionsRAI.Model) } func TestClient_OpenAI_GetChatCompletions(t *testing.T) { @@ -56,7 +55,7 @@ func TestClient_OpenAI_GetChatCompletions(t *testing.T) { } chatClient := newOpenAIClientForTest(t) - testGetChatCompletions(t, chatClient, openAI) + testGetChatCompletions(t, chatClient, openAI.ChatCompletions, false) } func TestClient_OpenAI_GetChatCompletionsStream(t *testing.T) { @@ -65,14 +64,14 @@ func TestClient_OpenAI_GetChatCompletionsStream(t *testing.T) { } chatClient := newOpenAIClientForTest(t) - testGetChatCompletionsStream(t, chatClient, openAI) + testGetChatCompletionsStream(t, chatClient, openAI.ChatCompletions) } -func testGetChatCompletions(t *testing.T, client *azopenai.Client, tv testVars) { +func testGetChatCompletions(t *testing.T, client *azopenai.Client, deployment string, checkRAI bool) { expected := azopenai.ChatCompletions{ Choices: []azopenai.ChatChoice{ { - Message: &azopenai.ChatChoiceMessage{ + Message: &azopenai.ChatResponseMessage{ Role: &expectedRole, Content: &expectedContent, }, @@ -89,15 +88,15 @@ func testGetChatCompletions(t *testing.T, client *azopenai.Client, tv testVars) }, } - resp, err := client.GetChatCompletions(context.Background(), newTestChatCompletionOptions(tv), nil) + resp, err := client.GetChatCompletions(context.Background(), newTestChatCompletionOptions(deployment), nil) skipNowIfThrottled(t, err) require.NoError(t, err) - if tv.Endpoint.Azure { + if checkRAI { // Azure also provides content-filtering. This particular prompt and responses // will be considered safe. - expected.PromptFilterResults = []azopenai.PromptFilterResult{ - {PromptIndex: to.Ptr[int32](0), ContentFilterResults: (*azopenai.PromptFilterResultContentFilterResults)(safeContentFilter)}, + expected.PromptFilterResults = []azopenai.ContentFilterResultsForPrompt{ + {PromptIndex: to.Ptr[int32](0), ContentFilterResults: safeContentFilterResultDetailsForPrompt}, } expected.Choices[0].ContentFilterResults = safeContentFilter } @@ -111,8 +110,8 @@ func testGetChatCompletions(t *testing.T, client *azopenai.Client, tv testVars) require.Equal(t, expected, resp.ChatCompletions) } -func testGetChatCompletionsStream(t *testing.T, client *azopenai.Client, tv testVars) { - streamResp, err := client.GetChatCompletionsStream(context.Background(), newTestChatCompletionOptions(tv), nil) +func testGetChatCompletionsStream(t *testing.T, client *azopenai.Client, deployment string) { + streamResp, err := client.GetChatCompletionsStream(context.Background(), newTestChatCompletionOptions(deployment), nil) if respErr := (*azcore.ResponseError)(nil); errors.As(err, &respErr) && respErr.StatusCode == http.StatusTooManyRequests { t.Skipf("OpenAI resource overloaded, skipping this test") @@ -136,8 +135,8 @@ func testGetChatCompletionsStream(t *testing.T, client *azopenai.Client, tv test require.NoError(t, err) if completion.PromptFilterResults != nil { - require.Equal(t, []azopenai.PromptFilterResult{ - {PromptIndex: to.Ptr[int32](0), ContentFilterResults: (*azopenai.PromptFilterResultContentFilterResults)(safeContentFilter)}, + require.Equal(t, []azopenai.ContentFilterResultsForPrompt{ + {PromptIndex: to.Ptr[int32](0), ContentFilterResults: safeContentFilterResultDetailsForPrompt}, }, completion.PromptFilterResults) } @@ -187,22 +186,21 @@ func TestClient_GetChatCompletions_DefaultAzureCredential(t *testing.T) { }) require.NoError(t, err) - testGetChatCompletions(t, chatClient, azureOpenAI) + testGetChatCompletions(t, chatClient, azureOpenAI.ChatCompletions, true) } func TestClient_GetChatCompletions_InvalidModel(t *testing.T) { client := newTestClient(t, azureOpenAI.Endpoint) _, err := client.GetChatCompletions(context.Background(), azopenai.ChatCompletionsOptions{ - Messages: []azopenai.ChatMessage{ - { - Role: to.Ptr(azopenai.ChatRole("user")), - Content: to.Ptr("Count to 100, with a comma between each number and no newlines. E.g., 1, 2, 3, ..."), + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestUserMessage{ + Content: azopenai.NewChatRequestUserMessageContent("Count to 100, with a comma between each number and no newlines. E.g., 1, 2, 3, ..."), }, }, - MaxTokens: to.Ptr(int32(1024)), - Temperature: to.Ptr(float32(0.0)), - Deployment: "invalid model name", + MaxTokens: to.Ptr(int32(1024)), + Temperature: to.Ptr(float32(0.0)), + DeploymentName: to.Ptr("invalid model name"), }, nil) var respErr *azcore.ResponseError @@ -217,15 +215,50 @@ func TestClient_GetChatCompletionsStream_Error(t *testing.T) { t.Run("AzureOpenAI", func(t *testing.T) { client := newBogusAzureOpenAIClient(t) - streamResp, err := client.GetChatCompletionsStream(context.Background(), newTestChatCompletionOptions(azureOpenAI), nil) + streamResp, err := client.GetChatCompletionsStream(context.Background(), newTestChatCompletionOptions(azureOpenAI.ChatCompletions), nil) require.Empty(t, streamResp) assertResponseIsError(t, err) }) t.Run("OpenAI", func(t *testing.T) { client := newBogusOpenAIClient(t) - streamResp, err := client.GetChatCompletionsStream(context.Background(), newTestChatCompletionOptions(openAI), nil) + streamResp, err := client.GetChatCompletionsStream(context.Background(), newTestChatCompletionOptions(openAI.ChatCompletions), nil) require.Empty(t, streamResp) assertResponseIsError(t, err) }) } + +func TestClient_OpenAI_GetChatCompletions_Vision(t *testing.T) { + if recording.GetRecordMode() == recording.LiveMode { + // we're having an issue right now with this preview feature. I luckily got + // a recording of it but it won't run in live mode at this moment. + t.Skipf("Skipping %s because of a temp live outage with vision preview", t.Name()) + } + + imageURL := "https://www.bing.com/th?id=OHR.BradgateFallow_EN-US3932725763_1920x1080.jpg" + + chatClient := newOpenAIClientForTest(t) + content := azopenai.NewChatRequestUserMessageContent([]azopenai.ChatCompletionRequestMessageContentPartClassification{ + &azopenai.ChatCompletionRequestMessageContentPartText{ + Text: to.Ptr("Describe this image"), + }, + &azopenai.ChatCompletionRequestMessageContentPartImage{ + ImageURL: &azopenai.ChatCompletionRequestMessageContentPartImageURL{ + URL: &imageURL, + }, + }, + }) + + resp, err := chatClient.GetChatCompletions(context.Background(), azopenai.ChatCompletionsOptions{ + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestUserMessage{ + Content: content, + }, + }, + DeploymentName: to.Ptr("gpt-4-vision-preview"), + }, nil) + require.NoError(t, err) + require.NotEmpty(t, resp.Choices[0].Message.Content) + + t.Logf(*resp.Choices[0].Message.Content) +} diff --git a/sdk/ai/azopenai/client_completions_test.go b/sdk/ai/azopenai/client_completions_test.go index db7695638416..13242d371f2c 100644 --- a/sdk/ai/azopenai/client_completions_test.go +++ b/sdk/ai/azopenai/client_completions_test.go @@ -37,10 +37,10 @@ func testGetCompletions(t *testing.T, client *azopenai.Client, isAzure bool) { } resp, err := client.GetCompletions(context.Background(), azopenai.CompletionsOptions{ - Prompt: []string{"What is Azure OpenAI?"}, - MaxTokens: to.Ptr(int32(2048 - 127)), - Temperature: to.Ptr(float32(0.0)), - Deployment: deploymentID, + Prompt: []string{"What is Azure OpenAI?"}, + MaxTokens: to.Ptr(int32(2048 - 127)), + Temperature: to.Ptr(float32(0.0)), + DeploymentName: &deploymentID, }, nil) skipNowIfThrottled(t, err) require.NoError(t, err) @@ -64,9 +64,9 @@ func testGetCompletions(t *testing.T, client *azopenai.Client, isAzure bool) { } if isAzure { - want.Choices[0].ContentFilterResults = (*azopenai.ChoiceContentFilterResults)(safeContentFilter) - want.PromptFilterResults = []azopenai.PromptFilterResult{ - {PromptIndex: to.Ptr[int32](0), ContentFilterResults: (*azopenai.PromptFilterResultContentFilterResults)(safeContentFilter)}, + want.Choices[0].ContentFilterResults = (*azopenai.ContentFilterResultsForChoice)(safeContentFilter) + want.PromptFilterResults = []azopenai.ContentFilterResultsForPrompt{ + {PromptIndex: to.Ptr[int32](0), ContentFilterResults: safeContentFilterResultDetailsForPrompt}, } } diff --git a/sdk/ai/azopenai/client_embeddings_test.go b/sdk/ai/azopenai/client_embeddings_test.go index b407d510afa4..31fdd9b46088 100644 --- a/sdk/ai/azopenai/client_embeddings_test.go +++ b/sdk/ai/azopenai/client_embeddings_test.go @@ -9,6 +9,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/ai/azopenai" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/stretchr/testify/require" ) @@ -16,7 +17,7 @@ func TestClient_GetEmbeddings_InvalidModel(t *testing.T) { client := newTestClient(t, azureOpenAI.Endpoint) _, err := client.GetEmbeddings(context.Background(), azopenai.EmbeddingsOptions{ - Deployment: "thisdoesntexist", + DeploymentName: to.Ptr("thisdoesntexist"), }, nil) var respErr *azcore.ResponseError @@ -60,8 +61,8 @@ func testGetEmbeddings(t *testing.T, client *azopenai.Client, modelOrDeploymentI ctx: context.TODO(), deploymentID: modelOrDeploymentID, body: azopenai.EmbeddingsOptions{ - Input: []string{"\"Your text string goes here\""}, - Deployment: modelOrDeploymentID, + Input: []string{"\"Your text string goes here\""}, + DeploymentName: &modelOrDeploymentID, }, options: nil, }, diff --git a/sdk/ai/azopenai/client_functions_test.go b/sdk/ai/azopenai/client_functions_test.go index 32096d01e31a..f18e07e9b422 100644 --- a/sdk/ai/azopenai/client_functions_test.go +++ b/sdk/ai/azopenai/client_functions_test.go @@ -30,21 +30,48 @@ func TestGetChatCompletions_usingFunctions(t *testing.T) { t.Run("OpenAI", func(t *testing.T) { chatClient := newOpenAIClientForTest(t) - testChatCompletionsFunctions(t, chatClient, openAI) + testChatCompletionsFunctions(t, chatClient, openAI.ChatCompletions) + testChatCompletionsFunctions(t, chatClient, openAI.ChatCompletionsLegacyFunctions) }) t.Run("AzureOpenAI", func(t *testing.T) { chatClient := newAzureOpenAIClientForTest(t, azureOpenAI) - testChatCompletionsFunctions(t, chatClient, azureOpenAI) + testChatCompletionsFunctions(t, chatClient, azureOpenAI.ChatCompletions) }) } -func testChatCompletionsFunctions(t *testing.T, chatClient *azopenai.Client, tv testVars) { +func TestGetChatCompletions_usingFunctions_legacy(t *testing.T) { + t.Run("OpenAI", func(t *testing.T) { + chatClient := newOpenAIClientForTest(t) + testChatCompletionsFunctionsOlderStyle(t, chatClient, openAI.ChatCompletionsLegacyFunctions) + testChatCompletionsFunctionsOlderStyle(t, chatClient, openAI.ChatCompletions) + }) + + t.Run("AzureOpenAI", func(t *testing.T) { + chatClient := newAzureOpenAIClientForTest(t, azureOpenAI) + testChatCompletionsFunctionsOlderStyle(t, chatClient, azureOpenAI.ChatCompletionsLegacyFunctions) + }) +} + +func TestGetChatCompletions_usingFunctions_streaming(t *testing.T) { + // https://platform.openai.com/docs/guides/gpt/function-calling + + t.Run("OpenAI", func(t *testing.T) { + chatClient := newOpenAIClientForTest(t) + testChatCompletionsFunctionsStreaming(t, chatClient, openAI) + }) + + t.Run("AzureOpenAI", func(t *testing.T) { + chatClient := newAzureOpenAIClientForTest(t, azureOpenAI) + testChatCompletionsFunctionsStreaming(t, chatClient, azureOpenAI) + }) +} + +func testChatCompletionsFunctionsOlderStyle(t *testing.T, client *azopenai.Client, deploymentName string) { body := azopenai.ChatCompletionsOptions{ - Deployment: tv.ChatCompletions, - Messages: []azopenai.ChatMessage{ - { - Role: to.Ptr(azopenai.ChatRoleUser), + DeploymentName: &deploymentName, + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestAssistantMessage{ Content: to.Ptr("What's the weather like in Boston, MA, in celsius?"), }, }, @@ -74,7 +101,7 @@ func testChatCompletionsFunctions(t *testing.T, chatClient *azopenai.Client, tv Temperature: to.Ptr[float32](0.0), } - resp, err := chatClient.GetChatCompletions(context.Background(), body, nil) + resp, err := client.GetChatCompletions(context.Background(), body, nil) require.NoError(t, err) funcCall := resp.ChatCompletions.Choices[0].Message.FunctionCall @@ -92,3 +119,145 @@ func testChatCompletionsFunctions(t *testing.T, chatClient *azopenai.Client, tv require.Equal(t, location{Location: "Boston, MA", Unit: "celsius"}, *funcParams) } + +func testChatCompletionsFunctions(t *testing.T, chatClient *azopenai.Client, deploymentName string) { + body := azopenai.ChatCompletionsOptions{ + DeploymentName: &deploymentName, + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestAssistantMessage{ + Content: to.Ptr("What's the weather like in Boston, MA, in celsius?"), + }, + }, + Tools: []azopenai.ChatCompletionsToolDefinitionClassification{ + &azopenai.ChatCompletionsFunctionToolDefinition{ + Function: &azopenai.FunctionDefinition{ + Name: to.Ptr("get_current_weather"), + Description: to.Ptr("Get the current weather in a given location"), + Parameters: Params{ + Required: []string{"location"}, + Type: "object", + Properties: map[string]ParamProperty{ + "location": { + Type: "string", + Description: "The city and state, e.g. San Francisco, CA", + }, + "unit": { + Type: "string", + Enum: []string{"celsius", "fahrenheit"}, + }, + }, + }, + }, + }, + }, + Temperature: to.Ptr[float32](0.0), + } + + resp, err := chatClient.GetChatCompletions(context.Background(), body, nil) + require.NoError(t, err) + + funcCall := resp.Choices[0].Message.ToolCalls[0].(*azopenai.ChatCompletionsFunctionToolCall).Function + + require.Equal(t, "get_current_weather", *funcCall.Name) + + type location struct { + Location string `json:"location"` + Unit string `json:"unit"` + } + + var funcParams *location + err = json.Unmarshal([]byte(*funcCall.Arguments), &funcParams) + require.NoError(t, err) + + require.Equal(t, location{Location: "Boston, MA", Unit: "celsius"}, *funcParams) +} + +func testChatCompletionsFunctionsStreaming(t *testing.T, chatClient *azopenai.Client, tv testVars) { + body := azopenai.ChatCompletionsOptions{ + DeploymentName: &tv.ChatCompletions, + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestAssistantMessage{ + Content: to.Ptr("What's the weather like in Boston, MA, in celsius?"), + }, + }, + Tools: []azopenai.ChatCompletionsToolDefinitionClassification{ + &azopenai.ChatCompletionsFunctionToolDefinition{ + Function: &azopenai.FunctionDefinition{ + Name: to.Ptr("get_current_weather"), + Description: to.Ptr("Get the current weather in a given location"), + Parameters: Params{ + Required: []string{"location"}, + Type: "object", + Properties: map[string]ParamProperty{ + "location": { + Type: "string", + Description: "The city and state, e.g. San Francisco, CA", + }, + "unit": { + Type: "string", + Enum: []string{"celsius", "fahrenheit"}, + }, + }, + }, + }, + }, + }, + Temperature: to.Ptr[float32](0.0), + } + + resp, err := chatClient.GetChatCompletionsStream(context.Background(), body, nil) + require.NoError(t, err) + require.NotEmpty(t, resp) + + defer func() { + err := resp.ChatCompletionsStream.Close() + require.NoError(t, err) + }() + + // these results are way trickier than they should be, but we have to accumulate across + // multiple fields to get a full result. + + funcCall := &azopenai.FunctionCall{ + Arguments: to.Ptr(""), + Name: to.Ptr(""), + } + + for { + streamResp, err := resp.ChatCompletionsStream.Read() + require.NoError(t, err) + + if len(streamResp.Choices) == 0 { + // there are prompt filter results. + require.NotEmpty(t, streamResp.PromptFilterResults) + continue + } + + if streamResp.Choices[0].FinishReason != nil { + break + } + + var functionToolCall *azopenai.ChatCompletionsFunctionToolCall = streamResp.Choices[0].Delta.ToolCalls[0].(*azopenai.ChatCompletionsFunctionToolCall) + require.NotEmpty(t, functionToolCall.Function) + + if functionToolCall.Function.Arguments != nil { + *funcCall.Arguments += *functionToolCall.Function.Arguments + } + + if functionToolCall.Function.Name != nil { + *funcCall.Name += *functionToolCall.Function.Name + } + } + + require.Equal(t, "get_current_weather", *funcCall.Name) + + type location struct { + Location string `json:"location"` + Unit string `json:"unit"` + } + + var funcParams *location + err = json.Unmarshal([]byte(*funcCall.Arguments), &funcParams) + require.NoError(t, err) + + require.Equal(t, location{Location: "Boston, MA", Unit: "celsius"}, *funcParams) +} diff --git a/sdk/ai/azopenai/client_rai_test.go b/sdk/ai/azopenai/client_rai_test.go index fa6f239376fe..2b3fcb95a3be 100644 --- a/sdk/ai/azopenai/client_rai_test.go +++ b/sdk/ai/azopenai/client_rai_test.go @@ -16,16 +16,19 @@ import ( "github.com/stretchr/testify/require" ) +// RAI == "responsible AI". This part of the API provides content filtering and +// classification of the failures into categories like Hate, Violence, etc... + func TestClient_GetCompletions_AzureOpenAI_ContentFilter_Response(t *testing.T) { // Scenario: Your API call asks for multiple responses (N>1) and at least 1 of the responses is filtered // https://github.com/MicrosoftDocs/azure-docs/blob/main/articles/cognitive-services/openai/concepts/content-filter.md#scenario-your-api-call-asks-for-multiple-responses-n1-and-at-least-1-of-the-responses-is-filtered client := newAzureOpenAIClientForTest(t, azureOpenAI) resp, err := client.GetCompletions(context.Background(), azopenai.CompletionsOptions{ - Prompt: []string{"How do I rob a bank with violence?"}, - MaxTokens: to.Ptr(int32(2048 - 127)), - Temperature: to.Ptr(float32(0.0)), - Deployment: azureOpenAI.Completions, + Prompt: []string{"How do I rob a bank with violence?"}, + MaxTokens: to.Ptr(int32(2048 - 127)), + Temperature: to.Ptr(float32(0.0)), + DeploymentName: &azureOpenAI.Completions, }, nil) require.Empty(t, resp) @@ -33,31 +36,31 @@ func TestClient_GetCompletions_AzureOpenAI_ContentFilter_Response(t *testing.T) } func TestClient_GetChatCompletions_AzureOpenAI_ContentFilterWithError(t *testing.T) { - client := newAzureOpenAIClientForTest(t, azureOpenAICanary) + client := newTestClient(t, azureOpenAI.ChatCompletionsRAI.Endpoint) resp, err := client.GetChatCompletions(context.Background(), azopenai.ChatCompletionsOptions{ - Messages: []azopenai.ChatMessage{ - {Role: to.Ptr(azopenai.ChatRoleSystem), Content: to.Ptr("You are a helpful assistant.")}, - {Role: to.Ptr(azopenai.ChatRoleUser), Content: to.Ptr("How do I rob a bank with violence?")}, + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestSystemMessage{Content: to.Ptr("You are a helpful assistant.")}, + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("How do I rob a bank with violence?")}, }, - MaxTokens: to.Ptr(int32(2048 - 127)), - Temperature: to.Ptr(float32(0.0)), - Deployment: azureOpenAICanary.ChatCompletions, + MaxTokens: to.Ptr(int32(2048 - 127)), + Temperature: to.Ptr(float32(0.0)), + DeploymentName: &azureOpenAI.ChatCompletionsRAI.Model, }, nil) require.Empty(t, resp) assertContentFilterError(t, err, true) } func TestClient_GetChatCompletions_AzureOpenAI_ContentFilter_WithResponse(t *testing.T) { - client := newAzureOpenAIClientForTest(t, azureOpenAICanary) + client := newTestClient(t, azureOpenAI.ChatCompletionsRAI.Endpoint) resp, err := client.GetChatCompletions(context.Background(), azopenai.ChatCompletionsOptions{ - Messages: []azopenai.ChatMessage{ - {Role: to.Ptr(azopenai.ChatRoleUser), Content: to.Ptr("How do I cook a bell pepper?")}, + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("How do I cook a bell pepper?")}, }, - MaxTokens: to.Ptr(int32(2048 - 127)), - Temperature: to.Ptr(float32(0.0)), - Deployment: azureOpenAICanary.ChatCompletions, + MaxTokens: to.Ptr(int32(2048 - 127)), + Temperature: to.Ptr(float32(0.0)), + DeploymentName: &azureOpenAI.ChatCompletionsRAI.Model, }, nil) require.NoError(t, err) @@ -78,6 +81,8 @@ func assertContentFilterError(t *testing.T, err error, requireAnnotations bool) require.ErrorAs(t, err, &contentFilterErr) if requireAnnotations { + require.NotNil(t, contentFilterErr.ContentFilterResults) + require.Equal(t, &azopenai.ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(azopenai.ContentFilterSeveritySafe)}, contentFilterErr.ContentFilterResults.Hate) require.Equal(t, &azopenai.ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(azopenai.ContentFilterSeveritySafe)}, contentFilterErr.ContentFilterResults.SelfHarm) require.Equal(t, &azopenai.ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(azopenai.ContentFilterSeveritySafe)}, contentFilterErr.ContentFilterResults.Sexual) @@ -85,7 +90,14 @@ func assertContentFilterError(t *testing.T, err error, requireAnnotations bool) } } -var safeContentFilter = &azopenai.ChatChoiceContentFilterResults{ +var safeContentFilter = &azopenai.ContentFilterResultsForChoice{ + Hate: &azopenai.ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(azopenai.ContentFilterSeveritySafe)}, + SelfHarm: &azopenai.ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(azopenai.ContentFilterSeveritySafe)}, + Sexual: &azopenai.ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(azopenai.ContentFilterSeveritySafe)}, + Violence: &azopenai.ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(azopenai.ContentFilterSeveritySafe)}, +} + +var safeContentFilterResultDetailsForPrompt = &azopenai.ContentFilterResultDetailsForPrompt{ Hate: &azopenai.ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(azopenai.ContentFilterSeveritySafe)}, SelfHarm: &azopenai.ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(azopenai.ContentFilterSeveritySafe)}, Sexual: &azopenai.ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(azopenai.ContentFilterSeveritySafe)}, diff --git a/sdk/ai/azopenai/client_shared_test.go b/sdk/ai/azopenai/client_shared_test.go index 6ff39b5aaa8c..86528fa8f02d 100644 --- a/sdk/ai/azopenai/client_shared_test.go +++ b/sdk/ai/azopenai/client_shared_test.go @@ -23,13 +23,8 @@ import ( ) var ( - azureOpenAI testVars - azureOpenAICanary testVars - openAI testVars - azureWhisper endpoint - azureWhisperModel string - openAIWhisper endpoint - openAIWhisperModel string + azureOpenAI testVars + openAI testVars ) type endpoint struct { @@ -38,6 +33,28 @@ type endpoint struct { Azure bool } +type testVars struct { + Endpoint endpoint + Completions string + ChatCompletions string + ChatCompletionsLegacyFunctions string + Embeddings string + Cognitive azopenai.AzureCognitiveSearchChatExtensionConfiguration + Whisper endpointWithModel + DallE endpointWithModel + + ChatCompletionsRAI endpointWithModel // at the moment this is Azure only + + // "own your data" - bringing in Azure resources as part of a chat completions + // request. + ChatCompletionsOYD endpointWithModel +} + +type endpointWithModel struct { + Endpoint endpoint + Model string +} + type testClientOption func(opt *azopenai.ClientOptions) func withForgivingRetryOption() testClientOption { @@ -78,41 +95,81 @@ func newTestClient(t *testing.T, ep endpoint, options ...testClientOption) *azop } } -type testVars struct { - Endpoint endpoint - Completions string - ChatCompletions string - Embeddings string - Cognitive azopenai.AzureCognitiveSearchChatExtensionConfiguration +// getEndpointWithModel retrieves details for an endpoint and a model. +// - res - the resource type for a particular endpoint. Ex: "DALLE". +// +// For example, if azure is true we'll load these environment values based on res: +// - AOAI_DALLE_ENDPOINT +// - AOAI_DALLE_API_KEY +// - AOAI_DALLE_MODEL +// +// if azure is false we'll load these environment values based on res: +// - OPENAI_ENDPOINT +// - OPENAI_API_KEY +// - OPENAI_DALLE_MODEL +func getEndpointWithModel(res string, isAzure bool) endpointWithModel { + var ep endpointWithModel + if isAzure { + // during development resources are often shifted between different + // internal Azure OpenAI resources. + ep = endpointWithModel{ + Endpoint: endpoint{ + URL: getRequired("AOAI_" + res + "_ENDPOINT"), + APIKey: getRequired("AOAI_" + res + "_API_KEY"), + Azure: true, + }, + Model: getRequired("AOAI_" + res + "_MODEL"), + } + } else { + ep = endpointWithModel{ + Endpoint: endpoint{ + URL: getRequired("OPENAI_ENDPOINT"), + APIKey: getRequired("OPENAI_API_KEY"), + Azure: false, + }, + Model: getRequired("OPENAI_" + res + "_MODEL"), + } + } + + if !strings.HasSuffix(ep.Endpoint.URL, "/") { + // (this just makes recording replacement easier) + ep.Endpoint.URL += "/" + } + + return ep } -func newTestVars(prefix string, isCanary bool) testVars { +func newTestVars(prefix string) testVars { azure := prefix == "AOAI" - suffix := "" - - if isCanary { - suffix += "_CANARY" - } tv := testVars{ Endpoint: endpoint{ - URL: getRequired(prefix + "_ENDPOINT" + suffix), - APIKey: getRequired(prefix + "_API_KEY" + suffix), + URL: getRequired(prefix + "_ENDPOINT"), + APIKey: getRequired(prefix + "_API_KEY"), Azure: azure, }, - Completions: getRequired(prefix + "_COMPLETIONS_MODEL" + suffix), - - // ex: gpt-4-0613 - ChatCompletions: getRequired(prefix + "_CHAT_COMPLETIONS_MODEL" + suffix), - - // ex: embedding - Embeddings: getRequired(prefix + "_EMBEDDINGS_MODEL" + suffix), + Completions: getRequired(prefix + "_COMPLETIONS_MODEL"), + ChatCompletions: getRequired(prefix + "_CHAT_COMPLETIONS_MODEL"), + ChatCompletionsLegacyFunctions: getRequired(prefix + "_CHAT_COMPLETIONS_MODEL_LEGACY_FUNCTIONS"), + Embeddings: getRequired(prefix + "_EMBEDDINGS_MODEL"), Cognitive: azopenai.AzureCognitiveSearchChatExtensionConfiguration{ - Endpoint: to.Ptr(getRequired("COGNITIVE_SEARCH_API_ENDPOINT")), - IndexName: to.Ptr(getRequired("COGNITIVE_SEARCH_API_INDEX")), - Key: to.Ptr(getRequired("COGNITIVE_SEARCH_API_KEY")), + Parameters: &azopenai.AzureCognitiveSearchChatExtensionParameters{ + Endpoint: to.Ptr(getRequired("COGNITIVE_SEARCH_API_ENDPOINT")), + IndexName: to.Ptr(getRequired("COGNITIVE_SEARCH_API_INDEX")), + Authentication: &azopenai.OnYourDataAPIKeyAuthenticationOptions{ + Key: to.Ptr(getRequired("COGNITIVE_SEARCH_API_KEY")), + }, + }, }, + + DallE: getEndpointWithModel("DALLE", azure), + Whisper: getEndpointWithModel("WHISPER", azure), + } + + if azure { + tv.ChatCompletionsRAI = getEndpointWithModel("CHAT_COMPLETIONS_RAI", azure) + tv.ChatCompletionsOYD = getEndpointWithModel("OYD", azure) } if tv.Endpoint.URL != "" && !strings.HasSuffix(tv.Endpoint.URL, "/") { @@ -138,67 +195,71 @@ func initEnvVars() { Azure: true, } - azureOpenAICanary.Endpoint = endpoint{ - URL: fakeEndpoint, - APIKey: fakeAPIKey, - Azure: true, + azureOpenAI.Whisper = endpointWithModel{ + Endpoint: azureOpenAI.Endpoint, + Model: "whisper-deployment", } - azureWhisper = endpoint{ - URL: fakeEndpoint, - APIKey: fakeAPIKey, - Azure: true, + azureOpenAI.ChatCompletionsRAI = endpointWithModel{ + Endpoint: azureOpenAI.Endpoint, + Model: "gpt-4", } - azureWhisperModel = "whisper-deployment" + azureOpenAI.ChatCompletionsOYD = endpointWithModel{ + Endpoint: azureOpenAI.Endpoint, + Model: "gpt-4", + } + + azureOpenAI.DallE = endpointWithModel{ + Endpoint: azureOpenAI.Endpoint, + Model: "dall-e-3", + } openAI.Endpoint = endpoint{ APIKey: fakeAPIKey, URL: fakeEndpoint, } - openAIWhisperModel = "whisper-1" + openAI.Whisper = endpointWithModel{ + Endpoint: endpoint{ + APIKey: fakeAPIKey, + URL: fakeEndpoint, + }, + Model: "whisper-1", + } - azureOpenAICanary.Completions = "" - azureOpenAICanary.ChatCompletions = "gpt-4" + openAI.DallE = endpointWithModel{ + Endpoint: openAI.Endpoint, + Model: "dall-e-3", + } azureOpenAI.Completions = "text-davinci-003" openAI.Completions = "text-davinci-003" - azureOpenAI.ChatCompletions = "gpt-4-0613" + azureOpenAI.ChatCompletions = "gpt-35-turbo-0613" + azureOpenAI.ChatCompletionsLegacyFunctions = "gpt-4-0613" openAI.ChatCompletions = "gpt-4-0613" + openAI.ChatCompletionsLegacyFunctions = "gpt-4-0613" openAI.Embeddings = "text-embedding-ada-002" azureOpenAI.Embeddings = "text-embedding-ada-002" azureOpenAI.Cognitive = azopenai.AzureCognitiveSearchChatExtensionConfiguration{ - Endpoint: to.Ptr(fakeCognitiveEndpoint), - IndexName: to.Ptr(fakeCognitiveIndexName), - Key: to.Ptr(fakeAPIKey), + Parameters: &azopenai.AzureCognitiveSearchChatExtensionParameters{ + Endpoint: to.Ptr(fakeCognitiveEndpoint), + IndexName: to.Ptr(fakeCognitiveIndexName), + Authentication: &azopenai.OnYourDataAPIKeyAuthenticationOptions{ + Key: to.Ptr(fakeAPIKey), + }, + }, } - azureOpenAICanary.Cognitive = azureOpenAI.Cognitive } else { if err := godotenv.Load(); err != nil { fmt.Printf("Failed to load .env file: %s\n", err) } - azureOpenAI = newTestVars("AOAI", false) - azureOpenAICanary = newTestVars("AOAI", true) - openAI = newTestVars("OPENAI", false) - - azureWhisper = endpoint{ - URL: getRequired("AOAI_ENDPOINT_WHISPER"), - APIKey: getRequired("AOAI_API_KEY_WHISPER"), - Azure: true, - } - azureWhisperModel = getRequired("AOAI_MODEL_WHISPER") - - openAIWhisper = endpoint{ - URL: getRequired("OPENAI_ENDPOINT"), - APIKey: getRequired("OPENAI_API_KEY"), - Azure: true, - } - openAIWhisperModel = "whisper-1" + azureOpenAI = newTestVars("AOAI") + openAI = newTestVars("OPENAI") } } @@ -218,15 +279,19 @@ func newRecordingTransporter(t *testing.T) policy.Transporter { err = recording.AddHeaderRegexSanitizer("User-Agent", "fake-user-agent", "", nil) require.NoError(t, err) - // "RequestUri": "https://openai-shared.openai.azure.com/openai/deployments/text-davinci-003/completions?api-version=2023-03-15-preview", - err = recording.AddURISanitizer(fakeEndpoint, regexp.QuoteMeta(azureOpenAI.Endpoint.URL), nil) - require.NoError(t, err) - - err = recording.AddURISanitizer(fakeEndpoint, regexp.QuoteMeta(azureOpenAICanary.Endpoint.URL), nil) - require.NoError(t, err) + endpoints := []string{ + azureOpenAI.Endpoint.URL, + azureOpenAI.ChatCompletionsRAI.Endpoint.URL, + azureOpenAI.Whisper.Endpoint.URL, + azureOpenAI.DallE.Endpoint.URL, + azureOpenAI.ChatCompletionsOYD.Endpoint.URL, + azureOpenAI.ChatCompletionsRAI.Endpoint.URL, + } - err = recording.AddURISanitizer(fakeEndpoint, regexp.QuoteMeta(azureWhisper.URL), nil) - require.NoError(t, err) + for _, ep := range endpoints { + err = recording.AddURISanitizer(fakeEndpoint, regexp.QuoteMeta(ep), nil) + require.NoError(t, err) + } err = recording.AddURISanitizer("/openai/operations/images/00000000-AAAA-BBBB-CCCC-DDDDDDDDDDDD", "/openai/operations/images/[A-Za-z-0-9]+", nil) require.NoError(t, err) @@ -238,17 +303,17 @@ func newRecordingTransporter(t *testing.T) policy.Transporter { err = recording.AddGeneralRegexSanitizer( fmt.Sprintf(`"endpoint": "%s"`, fakeCognitiveEndpoint), - fmt.Sprintf(`"endpoint":\s*"%s"`, *azureOpenAI.Cognitive.Endpoint), nil) + fmt.Sprintf(`"endpoint":\s*"%s"`, *azureOpenAI.Cognitive.Parameters.Endpoint), nil) require.NoError(t, err) err = recording.AddGeneralRegexSanitizer( fmt.Sprintf(`"indexName": "%s"`, fakeCognitiveIndexName), - fmt.Sprintf(`"indexName":\s*"%s"`, *azureOpenAI.Cognitive.IndexName), nil) + fmt.Sprintf(`"indexName":\s*"%s"`, *azureOpenAI.Cognitive.Parameters.IndexName), nil) require.NoError(t, err) err = recording.AddGeneralRegexSanitizer( fmt.Sprintf(`"key": "%s"`, fakeAPIKey), - fmt.Sprintf(`"key":\s*"%s"`, *azureOpenAI.Cognitive.Key), nil) + fmt.Sprintf(`"key":\s*"%s"`, *azureOpenAI.Cognitive.Parameters.Authentication.(*azopenai.OnYourDataAPIKeyAuthenticationOptions).Key), nil) require.NoError(t, err) } @@ -268,11 +333,14 @@ func newRecordingTransporter(t *testing.T) policy.Transporter { func newClientOptionsForTest(t *testing.T) *azopenai.ClientOptions { co := &azopenai.ClientOptions{} + // Useful when debugging responses. + co.Logging.IncludeBody = true + if recording.GetRecordMode() == recording.LiveMode { keyLogPath := os.Getenv("SSLKEYLOGFILE") if keyLogPath == "" { - return &azopenai.ClientOptions{} + return co } keyLogWriter, err := os.OpenFile(keyLogPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0777) @@ -292,13 +360,9 @@ func newClientOptionsForTest(t *testing.T) *azopenai.ClientOptions { co.Transport = newRecordingTransporter(t) } - // Useful when debugging responses. - //co.Logging.IncludeBody = true return co } -// newAzureOpenAIClientForTest can create a client pointing to the "canary" endpoint (basically - leading fixes or features) -// or the current deployed endpoint. func newAzureOpenAIClientForTest(t *testing.T, tv testVars) *azopenai.Client { return newTestClient(t, tv.Endpoint) } diff --git a/sdk/ai/azopenai/client_test.go b/sdk/ai/azopenai/client_test.go index 64d3122ca3f3..6c514bcc5c33 100644 --- a/sdk/ai/azopenai/client_test.go +++ b/sdk/ai/azopenai/client_test.go @@ -26,13 +26,12 @@ func TestClient_OpenAI_InvalidModel(t *testing.T) { chatClient := newOpenAIClientForTest(t) _, err := chatClient.GetChatCompletions(context.Background(), azopenai.ChatCompletionsOptions{ - Messages: []azopenai.ChatMessage{ - { - Role: to.Ptr(azopenai.ChatRoleSystem), + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestSystemMessage{ Content: to.Ptr("hello"), }, }, - Deployment: "non-existent-model", + DeploymentName: to.Ptr("non-existent-model"), }, nil) var respErr *azcore.ResponseError diff --git a/sdk/ai/azopenai/constants.go b/sdk/ai/azopenai/constants.go index 1d30207f5c26..d582f6e468b2 100644 --- a/sdk/ai/azopenai/constants.go +++ b/sdk/ai/azopenai/constants.go @@ -12,8 +12,10 @@ package azopenai type AudioTaskLabel string const ( + // AudioTaskLabelTranscribe - Accompanying response data resulted from an audio transcription task. AudioTaskLabelTranscribe AudioTaskLabel = "transcribe" - AudioTaskLabelTranslate AudioTaskLabel = "translate" + // AudioTaskLabelTranslate - Accompanying response data resulted from an audio translation task. + AudioTaskLabelTranslate AudioTaskLabel = "translate" ) // PossibleAudioTaskLabelValues returns the possible values for the AudioTaskLabel const type. @@ -28,11 +30,19 @@ func PossibleAudioTaskLabelValues() []AudioTaskLabel { type AudioTranscriptionFormat string const ( - AudioTranscriptionFormatJSON AudioTranscriptionFormat = "json" - AudioTranscriptionFormatSrt AudioTranscriptionFormat = "srt" - AudioTranscriptionFormatText AudioTranscriptionFormat = "text" + // AudioTranscriptionFormatJSON - Use a response body that is a JSON object containing a single 'text' field for the transcription. + AudioTranscriptionFormatJSON AudioTranscriptionFormat = "json" + // AudioTranscriptionFormatSrt - Use a response body that is plain text in SubRip (SRT) format that also includes timing information. + AudioTranscriptionFormatSrt AudioTranscriptionFormat = "srt" + // AudioTranscriptionFormatText - Use a response body that is plain text containing the raw, unannotated transcription. + AudioTranscriptionFormatText AudioTranscriptionFormat = "text" + // AudioTranscriptionFormatVerboseJSON - Use a response body that is a JSON object containing transcription text along with + // timing, segments, and other + // metadata. AudioTranscriptionFormatVerboseJSON AudioTranscriptionFormat = "verbose_json" - AudioTranscriptionFormatVtt AudioTranscriptionFormat = "vtt" + // AudioTranscriptionFormatVtt - Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes + // timing information. + AudioTranscriptionFormatVtt AudioTranscriptionFormat = "vtt" ) // PossibleAudioTranscriptionFormatValues returns the possible values for the AudioTranscriptionFormat const type. @@ -50,11 +60,19 @@ func PossibleAudioTranscriptionFormatValues() []AudioTranscriptionFormat { type AudioTranslationFormat string const ( - AudioTranslationFormatJSON AudioTranslationFormat = "json" - AudioTranslationFormatSrt AudioTranslationFormat = "srt" - AudioTranslationFormatText AudioTranslationFormat = "text" + // AudioTranslationFormatJSON - Use a response body that is a JSON object containing a single 'text' field for the translation. + AudioTranslationFormatJSON AudioTranslationFormat = "json" + // AudioTranslationFormatSrt - Use a response body that is plain text in SubRip (SRT) format that also includes timing information. + AudioTranslationFormatSrt AudioTranslationFormat = "srt" + // AudioTranslationFormatText - Use a response body that is plain text containing the raw, unannotated translation. + AudioTranslationFormatText AudioTranslationFormat = "text" + // AudioTranslationFormatVerboseJSON - Use a response body that is a JSON object containing translation text along with timing, + // segments, and other + // metadata. AudioTranslationFormatVerboseJSON AudioTranslationFormat = "verbose_json" - AudioTranslationFormatVtt AudioTranslationFormat = "vtt" + // AudioTranslationFormatVtt - Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes + // timing information. + AudioTranslationFormatVtt AudioTranslationFormat = "vtt" ) // PossibleAudioTranslationFormatValues returns the possible values for the AudioTranslationFormat const type. @@ -74,30 +92,27 @@ func PossibleAudioTranslationFormatValues() []AudioTranslationFormat { type AzureChatExtensionType string const ( - // AzureChatExtensionTypeAzureCognitiveSearch enables the use of an Azure Cognitive Search index with chat completions. - // [AzureChatExtensionConfiguration.Parameter] should be of type [AzureCognitiveSearchChatExtensionConfiguration]. + // AzureChatExtensionTypeAzureCognitiveSearch - Represents the use of Azure Cognitive Search as an Azure OpenAI chat extension. AzureChatExtensionTypeAzureCognitiveSearch AzureChatExtensionType = "AzureCognitiveSearch" + // AzureChatExtensionTypeAzureCosmosDB - Represents the use of Azure Cosmos DB as an Azure OpenAI chat extension. + AzureChatExtensionTypeAzureCosmosDB AzureChatExtensionType = "AzureCosmosDB" + // AzureChatExtensionTypeAzureMachineLearningIndex - Represents the use of Azure Machine Learning index as an Azure OpenAI + // chat extension. + AzureChatExtensionTypeAzureMachineLearningIndex AzureChatExtensionType = "AzureMLIndex" + // AzureChatExtensionTypeElasticsearch - Represents the use of Elasticsearch® index as an Azure OpenAI chat extension. + AzureChatExtensionTypeElasticsearch AzureChatExtensionType = "Elasticsearch" + // AzureChatExtensionTypePinecone - Represents the use of Pinecone index as an Azure OpenAI chat extension. + AzureChatExtensionTypePinecone AzureChatExtensionType = "Pinecone" ) // PossibleAzureChatExtensionTypeValues returns the possible values for the AzureChatExtensionType const type. func PossibleAzureChatExtensionTypeValues() []AzureChatExtensionType { return []AzureChatExtensionType{ AzureChatExtensionTypeAzureCognitiveSearch, - } -} - -// AzureCognitiveSearchChatExtensionConfigurationType - The type label to use when configuring Azure OpenAI chat extensions. -// This should typically not be changed from its default value for Azure Cognitive Search. -type AzureCognitiveSearchChatExtensionConfigurationType string - -const ( - AzureCognitiveSearchChatExtensionConfigurationTypeAzureCognitiveSearch AzureCognitiveSearchChatExtensionConfigurationType = "AzureCognitiveSearch" -) - -// PossibleAzureCognitiveSearchChatExtensionConfigurationTypeValues returns the possible values for the AzureCognitiveSearchChatExtensionConfigurationType const type. -func PossibleAzureCognitiveSearchChatExtensionConfigurationTypeValues() []AzureCognitiveSearchChatExtensionConfigurationType { - return []AzureCognitiveSearchChatExtensionConfigurationType{ - AzureCognitiveSearchChatExtensionConfigurationTypeAzureCognitiveSearch, + AzureChatExtensionTypeAzureCosmosDB, + AzureChatExtensionTypeAzureMachineLearningIndex, + AzureChatExtensionTypeElasticsearch, + AzureChatExtensionTypePinecone, } } @@ -106,11 +121,16 @@ func PossibleAzureCognitiveSearchChatExtensionConfigurationTypeValues() []AzureC type AzureCognitiveSearchQueryType string const ( - AzureCognitiveSearchQueryTypeSemantic AzureCognitiveSearchQueryType = "semantic" - AzureCognitiveSearchQueryTypeSimple AzureCognitiveSearchQueryType = "simple" - AzureCognitiveSearchQueryTypeVector AzureCognitiveSearchQueryType = "vector" + // AzureCognitiveSearchQueryTypeSemantic - Represents the semantic query parser for advanced semantic modeling. + AzureCognitiveSearchQueryTypeSemantic AzureCognitiveSearchQueryType = "semantic" + // AzureCognitiveSearchQueryTypeSimple - Represents the default, simple query parser. + AzureCognitiveSearchQueryTypeSimple AzureCognitiveSearchQueryType = "simple" + // AzureCognitiveSearchQueryTypeVector - Represents vector search over computed data. + AzureCognitiveSearchQueryTypeVector AzureCognitiveSearchQueryType = "vector" + // AzureCognitiveSearchQueryTypeVectorSemanticHybrid - Represents a combination of semantic search and vector data querying. AzureCognitiveSearchQueryTypeVectorSemanticHybrid AzureCognitiveSearchQueryType = "vectorSemanticHybrid" - AzureCognitiveSearchQueryTypeVectorSimpleHybrid AzureCognitiveSearchQueryType = "vectorSimpleHybrid" + // AzureCognitiveSearchQueryTypeVectorSimpleHybrid - Represents a combination of the simple query strategy with vector data. + AzureCognitiveSearchQueryTypeVectorSimpleHybrid AzureCognitiveSearchQueryType = "vectorSimpleHybrid" ) // PossibleAzureCognitiveSearchQueryTypeValues returns the possible values for the AzureCognitiveSearchQueryType const type. @@ -124,26 +144,77 @@ func PossibleAzureCognitiveSearchQueryTypeValues() []AzureCognitiveSearchQueryTy } } -// azureOpenAIOperationState - The state of a job or item. -type azureOpenAIOperationState string +// ChatCompletionRequestMessageContentPartImageURLDetail - Specifies the detail level of the image. Learn more in the Vision +// guide [/docs/guides/vision/low-or-high-fidelity-image-understanding]. +type ChatCompletionRequestMessageContentPartImageURLDetail string + +const ( + ChatCompletionRequestMessageContentPartImageURLDetailAuto ChatCompletionRequestMessageContentPartImageURLDetail = "auto" + ChatCompletionRequestMessageContentPartImageURLDetailHigh ChatCompletionRequestMessageContentPartImageURLDetail = "high" + ChatCompletionRequestMessageContentPartImageURLDetailLow ChatCompletionRequestMessageContentPartImageURLDetail = "low" +) + +// PossibleChatCompletionRequestMessageContentPartImageURLDetailValues returns the possible values for the ChatCompletionRequestMessageContentPartImageURLDetail const type. +func PossibleChatCompletionRequestMessageContentPartImageURLDetailValues() []ChatCompletionRequestMessageContentPartImageURLDetail { + return []ChatCompletionRequestMessageContentPartImageURLDetail{ + ChatCompletionRequestMessageContentPartImageURLDetailAuto, + ChatCompletionRequestMessageContentPartImageURLDetailHigh, + ChatCompletionRequestMessageContentPartImageURLDetailLow, + } +} + +// ChatCompletionRequestMessageContentPartType - The type of the content part. +type ChatCompletionRequestMessageContentPartType string const ( - azureOpenAIOperationStateCanceled azureOpenAIOperationState = "canceled" - azureOpenAIOperationStateFailed azureOpenAIOperationState = "failed" - azureOpenAIOperationStateNotRunning azureOpenAIOperationState = "notRunning" - azureOpenAIOperationStateRunning azureOpenAIOperationState = "running" - azureOpenAIOperationStateSucceeded azureOpenAIOperationState = "succeeded" + // ChatCompletionRequestMessageContentPartTypeImageURL - Chat content contains an image URL + ChatCompletionRequestMessageContentPartTypeImageURL ChatCompletionRequestMessageContentPartType = "image_url" + // ChatCompletionRequestMessageContentPartTypeText - Chat content contains text + ChatCompletionRequestMessageContentPartTypeText ChatCompletionRequestMessageContentPartType = "text" ) +// PossibleChatCompletionRequestMessageContentPartTypeValues returns the possible values for the ChatCompletionRequestMessageContentPartType const type. +func PossibleChatCompletionRequestMessageContentPartTypeValues() []ChatCompletionRequestMessageContentPartType { + return []ChatCompletionRequestMessageContentPartType{ + ChatCompletionRequestMessageContentPartTypeImageURL, + ChatCompletionRequestMessageContentPartTypeText, + } +} + +// ChatCompletionsResponseFormat - The valid response formats Chat Completions can provide. Used to enable JSON mode. +type ChatCompletionsResponseFormat string + +const ( + // ChatCompletionsResponseFormatJSONObject - Use a response format that guarantees emission of a valid JSON object. Only structure + // is guaranteed and contents must + // still be validated. + ChatCompletionsResponseFormatJSONObject ChatCompletionsResponseFormat = "json_object" + // ChatCompletionsResponseFormatText - Use the default, plain text response format. + ChatCompletionsResponseFormatText ChatCompletionsResponseFormat = "text" +) + +// PossibleChatCompletionsResponseFormatValues returns the possible values for the ChatCompletionsResponseFormat const type. +func PossibleChatCompletionsResponseFormatValues() []ChatCompletionsResponseFormat { + return []ChatCompletionsResponseFormat{ + ChatCompletionsResponseFormatJSONObject, + ChatCompletionsResponseFormatText, + } +} + // ChatRole - A description of the intended purpose of a message within a chat completions interaction. type ChatRole string const ( + // ChatRoleAssistant - The role that provides responses to system-instructed, user-prompted input. ChatRoleAssistant ChatRole = "assistant" - ChatRoleFunction ChatRole = "function" - ChatRoleSystem ChatRole = "system" - ChatRoleTool ChatRole = "tool" - ChatRoleUser ChatRole = "user" + // ChatRoleFunction - The role that provides function results for chat completions. + ChatRoleFunction ChatRole = "function" + // ChatRoleSystem - The role that instructs or sets the behavior of the assistant. + ChatRoleSystem ChatRole = "system" + // ChatRoleTool - The role that represents extension tool activity within a chat completions operation. + ChatRoleTool ChatRole = "tool" + // ChatRoleUser - The role that provides input for chat completions. + ChatRoleUser ChatRole = "user" ) // PossibleChatRoleValues returns the possible values for the ChatRole const type. @@ -161,19 +232,28 @@ func PossibleChatRoleValues() []ChatRole { type CompletionsFinishReason string const ( - CompletionsFinishReasonContentFilter CompletionsFinishReason = "content_filter" - CompletionsFinishReasonFunctionCall CompletionsFinishReason = "function_call" - CompletionsFinishReasonLength CompletionsFinishReason = "length" - CompletionsFinishReasonStop CompletionsFinishReason = "stop" + // CompletionsFinishReasonContentFiltered - Completions generated a response that was identified as potentially sensitive + // per content + // moderation policies. + CompletionsFinishReasonContentFiltered CompletionsFinishReason = "content_filter" + // CompletionsFinishReasonFunctionCall - Completion ended normally, with the model requesting a function to be called. + CompletionsFinishReasonFunctionCall CompletionsFinishReason = "function_call" + // CompletionsFinishReasonStopped - Completions ended normally and reached its end of token generation. + CompletionsFinishReasonStopped CompletionsFinishReason = "stop" + // CompletionsFinishReasonTokenLimitReached - Completions exhausted available token limits before generation could complete. + CompletionsFinishReasonTokenLimitReached CompletionsFinishReason = "length" + // CompletionsFinishReasonToolCalls - Completion ended with the model calling a provided tool for output. + CompletionsFinishReasonToolCalls CompletionsFinishReason = "tool_calls" ) // PossibleCompletionsFinishReasonValues returns the possible values for the CompletionsFinishReason const type. func PossibleCompletionsFinishReasonValues() []CompletionsFinishReason { return []CompletionsFinishReason{ - CompletionsFinishReasonContentFilter, + CompletionsFinishReasonContentFiltered, CompletionsFinishReasonFunctionCall, - CompletionsFinishReasonLength, - CompletionsFinishReasonStop, + CompletionsFinishReasonStopped, + CompletionsFinishReasonTokenLimitReached, + CompletionsFinishReasonToolCalls, } } @@ -181,10 +261,23 @@ func PossibleCompletionsFinishReasonValues() []CompletionsFinishReason { type ContentFilterSeverity string const ( - ContentFilterSeverityHigh ContentFilterSeverity = "high" - ContentFilterSeverityLow ContentFilterSeverity = "low" + // ContentFilterSeverityHigh - Content that displays explicit and severe harmful instructions, actions, + // damage, or abuse; includes endorsement, glorification, or promotion of severe + // harmful acts, extreme or illegal forms of harm, radicalization, or non-consensual + // power exchange or abuse. + ContentFilterSeverityHigh ContentFilterSeverity = "high" + // ContentFilterSeverityLow - Content that expresses prejudiced, judgmental, or opinionated views, includes offensive + // use of language, stereotyping, use cases exploring a fictional world (for example, gaming, + // literature) and depictions at low intensity. + ContentFilterSeverityLow ContentFilterSeverity = "low" + // ContentFilterSeverityMedium - Content that uses offensive, insulting, mocking, intimidating, or demeaning language + // towards specific identity groups, includes depictions of seeking and executing harmful + // instructions, fantasies, glorification, promotion of harm at medium intensity. ContentFilterSeverityMedium ContentFilterSeverity = "medium" - ContentFilterSeveritySafe ContentFilterSeverity = "safe" + // ContentFilterSeveritySafe - Content may be related to violence, self-harm, sexual, or hate categories but the terms + // are used in general, journalistic, scientific, medical, and similar professional contexts, + // which are appropriate for most audiences. + ContentFilterSeveritySafe ContentFilterSeverity = "safe" ) // PossibleContentFilterSeverityValues returns the possible values for the ContentFilterSeverity const type. @@ -197,20 +290,42 @@ func PossibleContentFilterSeverityValues() []ContentFilterSeverity { } } -// FunctionCallPreset - The collection of predefined behaviors for handling request-provided function information in a chat -// completions operation. -type FunctionCallPreset string +// ElasticsearchQueryType - The type of Elasticsearch® retrieval query that should be executed when using it as an Azure OpenAI +// chat extension. +type ElasticsearchQueryType string + +const ( + // ElasticsearchQueryTypeSimple - Represents the default, simple query parser. + ElasticsearchQueryTypeSimple ElasticsearchQueryType = "simple" + // ElasticsearchQueryTypeVector - Represents vector search over computed data. + ElasticsearchQueryTypeVector ElasticsearchQueryType = "vector" +) + +// PossibleElasticsearchQueryTypeValues returns the possible values for the ElasticsearchQueryType const type. +func PossibleElasticsearchQueryTypeValues() []ElasticsearchQueryType { + return []ElasticsearchQueryType{ + ElasticsearchQueryTypeSimple, + ElasticsearchQueryTypeVector, + } +} + +// ImageGenerationQuality - An image generation configuration that specifies how the model should prioritize quality, cost, +// and speed. Only configurable with dall-e-3 models. +type ImageGenerationQuality string const ( - FunctionCallPresetAuto FunctionCallPreset = "auto" - FunctionCallPresetNone FunctionCallPreset = "none" + // ImageGenerationQualityHd - Requests image generation with higher quality, higher cost and lower speed relative to standard. + ImageGenerationQualityHd ImageGenerationQuality = "hd" + // ImageGenerationQualityStandard - Requests image generation with standard, balanced characteristics of quality, cost, and + // speed. + ImageGenerationQualityStandard ImageGenerationQuality = "standard" ) -// PossibleFunctionCallPresetValues returns the possible values for the FunctionCallPreset const type. -func PossibleFunctionCallPresetValues() []FunctionCallPreset { - return []FunctionCallPreset{ - FunctionCallPresetAuto, - FunctionCallPresetNone, +// PossibleImageGenerationQualityValues returns the possible values for the ImageGenerationQuality const type. +func PossibleImageGenerationQualityValues() []ImageGenerationQuality { + return []ImageGenerationQuality{ + ImageGenerationQualityHd, + ImageGenerationQualityStandard, } } @@ -218,32 +333,123 @@ func PossibleFunctionCallPresetValues() []FunctionCallPreset { type ImageGenerationResponseFormat string const ( - ImageGenerationResponseFormatB64JSON ImageGenerationResponseFormat = "b64_json" - ImageGenerationResponseFormatURL ImageGenerationResponseFormat = "url" + // ImageGenerationResponseFormatBase64 - Image generation response items should provide image data as a base64-encoded string. + ImageGenerationResponseFormatBase64 ImageGenerationResponseFormat = "b64_json" + // ImageGenerationResponseFormatURL - Image generation response items should provide a URL from which the image may be retrieved. + ImageGenerationResponseFormatURL ImageGenerationResponseFormat = "url" ) // PossibleImageGenerationResponseFormatValues returns the possible values for the ImageGenerationResponseFormat const type. func PossibleImageGenerationResponseFormatValues() []ImageGenerationResponseFormat { return []ImageGenerationResponseFormat{ - ImageGenerationResponseFormatB64JSON, + ImageGenerationResponseFormatBase64, ImageGenerationResponseFormatURL, } } -// ImageSize - The desired size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. +// ImageGenerationStyle - An image generation configuration that specifies how the model should incorporate realism and other +// visual characteristics. Only configurable with dall-e-3 models. +type ImageGenerationStyle string + +const ( + // ImageGenerationStyleNatural - Requests image generation in a natural style with less preference for dramatic and hyper-realistic + // characteristics. + ImageGenerationStyleNatural ImageGenerationStyle = "natural" + // ImageGenerationStyleVivid - Requests image generation in a vivid style with a higher preference for dramatic and hyper-realistic + // characteristics. + ImageGenerationStyleVivid ImageGenerationStyle = "vivid" +) + +// PossibleImageGenerationStyleValues returns the possible values for the ImageGenerationStyle const type. +func PossibleImageGenerationStyleValues() []ImageGenerationStyle { + return []ImageGenerationStyle{ + ImageGenerationStyleNatural, + ImageGenerationStyleVivid, + } +} + +// ImageSize - The desired size of generated images. type ImageSize string const ( - ImageSize512x512 ImageSize = "512x512" - ImageSize1024x1024 ImageSize = "1024x1024" - ImageSize256x256 ImageSize = "256x256" + // ImageSizeSize1024X1024 - A standard, square image size of 1024x1024 pixels. + // Supported by both dall-e-2 and dall-e-3 models. + ImageSizeSize1024X1024 ImageSize = "1024x1024" + // ImageSizeSize1024X1792 - A taller image size of 1792x1024 pixels. + // Only supported with dall-e-3 models. + ImageSizeSize1024X1792 ImageSize = "1024x1792" + // ImageSizeSize1792X1024 - A wider image size of 1024x1792 pixels. + // Only supported with dall-e-3 models. + ImageSizeSize1792X1024 ImageSize = "1792x1024" + // ImageSizeSize256X256 - Very small image size of 256x256 pixels. + // Only supported with dall-e-2 models. + ImageSizeSize256X256 ImageSize = "256x256" + // ImageSizeSize512X512 - A smaller image size of 512x512 pixels. + // Only supported with dall-e-2 models. + ImageSizeSize512X512 ImageSize = "512x512" ) // PossibleImageSizeValues returns the possible values for the ImageSize const type. func PossibleImageSizeValues() []ImageSize { return []ImageSize{ - ImageSize512x512, - ImageSize1024x1024, - ImageSize256x256, + ImageSizeSize1024X1024, + ImageSizeSize1024X1792, + ImageSizeSize1792X1024, + ImageSizeSize256X256, + ImageSizeSize512X512, + } +} + +// OnYourDataAuthenticationType - The authentication types supported with Azure OpenAI On Your Data. +type OnYourDataAuthenticationType string + +const ( + // OnYourDataAuthenticationTypeAPIKey - Authentication via API key. + OnYourDataAuthenticationTypeAPIKey OnYourDataAuthenticationType = "APIKey" + // OnYourDataAuthenticationTypeConnectionString - Authentication via connection string. + OnYourDataAuthenticationTypeConnectionString OnYourDataAuthenticationType = "ConnectionString" + // OnYourDataAuthenticationTypeKeyAndKeyID - Authentication via key and key ID pair. + OnYourDataAuthenticationTypeKeyAndKeyID OnYourDataAuthenticationType = "KeyAndKeyId" + // OnYourDataAuthenticationTypeSystemAssignedManagedIdentity - Authentication via system-assigned managed identity. + OnYourDataAuthenticationTypeSystemAssignedManagedIdentity OnYourDataAuthenticationType = "SystemAssignedManagedIdentity" + // OnYourDataAuthenticationTypeUserAssignedManagedIdentity - Authentication via user-assigned managed identity. + OnYourDataAuthenticationTypeUserAssignedManagedIdentity OnYourDataAuthenticationType = "UserAssignedManagedIdentity" +) + +// PossibleOnYourDataAuthenticationTypeValues returns the possible values for the OnYourDataAuthenticationType const type. +func PossibleOnYourDataAuthenticationTypeValues() []OnYourDataAuthenticationType { + return []OnYourDataAuthenticationType{ + OnYourDataAuthenticationTypeAPIKey, + OnYourDataAuthenticationTypeConnectionString, + OnYourDataAuthenticationTypeKeyAndKeyID, + OnYourDataAuthenticationTypeSystemAssignedManagedIdentity, + OnYourDataAuthenticationTypeUserAssignedManagedIdentity, + } +} + +// OnYourDataVectorizationSourceType - Represents the available sources Azure OpenAI On Your Data can use to configure vectorization +// of data for use with vector search. +type OnYourDataVectorizationSourceType string + +const ( + // OnYourDataVectorizationSourceTypeDeploymentName - Represents an Ada model deployment name to use. This model deployment + // must be in the same Azure OpenAI resource, but + // On Your Data will use this model deployment via an internal call rather than a public one, which enables vector + // search even in private networks. + OnYourDataVectorizationSourceTypeDeploymentName OnYourDataVectorizationSourceType = "DeploymentName" + // OnYourDataVectorizationSourceTypeEndpoint - Represents vectorization performed by public service calls to an Azure OpenAI + // embedding model. + OnYourDataVectorizationSourceTypeEndpoint OnYourDataVectorizationSourceType = "Endpoint" + // OnYourDataVectorizationSourceTypeModelID - Represents a specific embedding model ID as defined in the search service. + // Currently only supported by Elasticsearch®. + OnYourDataVectorizationSourceTypeModelID OnYourDataVectorizationSourceType = "ModelId" +) + +// PossibleOnYourDataVectorizationSourceTypeValues returns the possible values for the OnYourDataVectorizationSourceType const type. +func PossibleOnYourDataVectorizationSourceTypeValues() []OnYourDataVectorizationSourceType { + return []OnYourDataVectorizationSourceType{ + OnYourDataVectorizationSourceTypeDeploymentName, + OnYourDataVectorizationSourceTypeEndpoint, + OnYourDataVectorizationSourceTypeModelID, } } diff --git a/sdk/ai/azopenai/custom_client.go b/sdk/ai/azopenai/custom_client.go index 02a5b3084bf8..3a6a9ba70306 100644 --- a/sdk/ai/azopenai/custom_client.go +++ b/sdk/ai/azopenai/custom_client.go @@ -197,12 +197,6 @@ func (client *Client) GetChatCompletionsStream(ctx context.Context, body ChatCom if hasAzureExtensions(body) { req, err = client.getChatCompletionsWithAzureExtensionsCreateRequest(ctx, body, &GetChatCompletionsWithAzureExtensionsOptions{}) - - if err == nil { - reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-08-01-preview") - req.Raw().URL.RawQuery = reqQP.Encode() - } } else { req, err = client.getChatCompletionsCreateRequest(ctx, body, &GetChatCompletionsOptions{}) } @@ -277,25 +271,59 @@ type clientData struct { azure bool } -func getDeployment[T ChatCompletionsOptions | CompletionsOptions | EmbeddingsOptions | ImageGenerationOptions | *getAudioTranscriptionInternalOptions | *getAudioTranslationInternalOptions](v T) string { +func getDeployment[T AudioTranscriptionOptions | AudioTranslationOptions | ChatCompletionsOptions | CompletionsOptions | EmbeddingsOptions | *getAudioTranscriptionInternalOptions | *getAudioTranslationInternalOptions | ImageGenerationOptions](v T) string { + var p *string + switch a := any(v).(type) { + case AudioTranscriptionOptions: + p = a.DeploymentName + case AudioTranslationOptions: + p = a.DeploymentName case ChatCompletionsOptions: - return a.Deployment + p = a.DeploymentName case CompletionsOptions: - return a.Deployment + p = a.DeploymentName case EmbeddingsOptions: - return a.Deployment - case ImageGenerationOptions: - return "" + p = a.DeploymentName case *getAudioTranscriptionInternalOptions: - return *a.Model + p = a.Model case *getAudioTranslationInternalOptions: - return *a.Model - default: - return "" + p = a.Model + case ImageGenerationOptions: + p = a.DeploymentName + } + + if p != nil { + return *p } + + return "" } func hasAzureExtensions(body ChatCompletionsOptions) bool { - return body.AzureExtensionsOptions != nil && len(body.AzureExtensionsOptions.Extensions) > 0 + return body.AzureExtensionsOptions != nil && len(body.AzureExtensionsOptions) > 0 +} + +// ChatRequestUserMessageContent contains the user prompt - either as a single string +// or as a []ChatCompletionRequestMessageContentPart, enabling images and text as input. +// +// NOTE: This should be created using [azopenai.NewChatRequestUserMessageContent] +type ChatRequestUserMessageContent struct { + value any +} + +// NewChatRequestUserMessageContent creates a [azopenai.ChatRequestUserMessageContent]. +func NewChatRequestUserMessageContent[T string | []ChatCompletionRequestMessageContentPartClassification](v T) ChatRequestUserMessageContent { + switch actualV := any(v).(type) { + case string: + return ChatRequestUserMessageContent{value: &actualV} + case []ChatCompletionRequestMessageContentPartClassification: + return ChatRequestUserMessageContent{value: actualV} + } + return ChatRequestUserMessageContent{} +} + +// MarshalJSON implements the json.Marshaller interface for type Error. +func (c ChatRequestUserMessageContent) MarshalJSON() ([]byte, error) { + return json.Marshal(c.value) } diff --git a/sdk/ai/azopenai/custom_client_audio.go b/sdk/ai/azopenai/custom_client_audio.go index c521c27188a1..caf80ef65768 100644 --- a/sdk/ai/azopenai/custom_client_audio.go +++ b/sdk/ai/azopenai/custom_client_audio.go @@ -41,9 +41,11 @@ type GetAudioTranscriptionResponse struct { // - body - contains parameters to specify audio data to transcribe and control the transcription. // - options - optional parameters for this method. func (client *Client) GetAudioTranscription(ctx context.Context, body AudioTranscriptionOptions, options *GetAudioTranscriptionOptions) (GetAudioTranscriptionResponse, error) { - resp, err := client.getAudioTranscriptionInternal(ctx, body.File, &getAudioTranscriptionInternalOptions{ + audioStream := streaming.NopCloser(bytes.NewReader(body.File)) + + resp, err := client.getAudioTranscriptionInternal(ctx, getDeployment(body), audioStream, &getAudioTranscriptionInternalOptions{ Language: body.Language, - Model: &body.Deployment, + Model: body.DeploymentName, Prompt: body.Prompt, ResponseFormat: body.ResponseFormat, Temperature: body.Temperature, @@ -63,7 +65,7 @@ type GetAudioTranslationOptions struct { // GetAudioTranslationResponse contains the response from method [Client.GetAudioTranslation]. type GetAudioTranslationResponse struct { - AudioTranscription + AudioTranslation } // GetAudioTranslation gets English language transcribed text and associated metadata from provided spoken audio @@ -74,8 +76,10 @@ type GetAudioTranslationResponse struct { // - body - contains parameters to specify audio data to translate and control the translation. // - options - optional parameters for this method. func (client *Client) GetAudioTranslation(ctx context.Context, body AudioTranslationOptions, options *GetAudioTranslationOptions) (GetAudioTranslationResponse, error) { - resp, err := client.getAudioTranslationInternal(ctx, body.File, &getAudioTranslationInternalOptions{ - Model: &body.Deployment, + audioStream := streaming.NopCloser(bytes.NewReader(body.File)) + + resp, err := client.getAudioTranslationInternal(ctx, getDeployment(body), audioStream, &getAudioTranslationInternalOptions{ + Model: body.DeploymentName, Prompt: body.Prompt, ResponseFormat: body.ResponseFormat, Temperature: body.Temperature, @@ -88,23 +92,21 @@ func (client *Client) GetAudioTranslation(ctx context.Context, body AudioTransla return GetAudioTranslationResponse(resp), nil } -func setMultipartFormData[T getAudioTranscriptionInternalOptions | getAudioTranslationInternalOptions](req *policy.Request, file []byte, options T) error { +func setMultipartFormData[T getAudioTranscriptionInternalOptions | getAudioTranslationInternalOptions](req *policy.Request, file io.ReadSeekCloser, options T) error { body := bytes.Buffer{} writer := multipart.NewWriter(&body) - writeContent := func(fieldname, filename string, src io.Reader) error { + writeContent := func(fieldname, filename string, content io.ReadSeekCloser) error { fd, err := writer.CreateFormFile(fieldname, filename) if err != nil { return err } - // copy the data to the form file - if _, err = io.Copy(fd, src); err != nil { - return err - } - return nil + + _, err = io.Copy(fd, file) + return err } - if err := writeContent("file", "audio.mp3", bytes.NewReader(file)); err != nil { + if err := writeContent("file", "audio.mp3", file); err != nil { return err } @@ -160,13 +162,13 @@ func getAudioTranscriptionInternalHandleResponse(resp *http.Response) (getAudioT } func getAudioTranslationInternalHandleResponse(resp *http.Response) (getAudioTranslationInternalResponse, error) { - at, err := deserializeAudioTranscription(resp) + at, err := deserializeAudioTranslation(resp) if err != nil { return getAudioTranslationInternalResponse{}, err } - return getAudioTranslationInternalResponse{AudioTranscription: at}, nil + return getAudioTranslationInternalResponse{AudioTranslation: at}, nil } // deserializeAudioTranscription handles deserializing the content if it's text/plain @@ -198,6 +200,35 @@ func deserializeAudioTranscription(resp *http.Response) (AudioTranscription, err return *result, nil } +// deserializeAudioTranslation handles deserializing the content if it's text/plain +// or a JSON object. +func deserializeAudioTranslation(resp *http.Response) (AudioTranslation, error) { + defer func() { + _ = resp.Request.Body.Close() + }() + + contentType := resp.Header.Get("Content-type") + + if strings.Contains(contentType, "text/plain") { + body, err := io.ReadAll(resp.Body) + + if err != nil { + return AudioTranslation{}, err + } + + return AudioTranslation{ + Text: to.Ptr(string(body)), + }, nil + } + + var result *AudioTranslation + if err := runtime.UnmarshalAsJSON(resp, &result); err != nil { + return AudioTranslation{}, err + } + + return *result, nil +} + func writeField[T interface { string | float32 | AudioTranscriptionFormat | AudioTranslationFormat }](writer *multipart.Writer, fieldName string, v *T) error { diff --git a/sdk/ai/azopenai/custom_client_image.go b/sdk/ai/azopenai/custom_client_image.go deleted file mode 100644 index d3a702006753..000000000000 --- a/sdk/ai/azopenai/custom_client_image.go +++ /dev/null @@ -1,88 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package azopenai - -import ( - "context" - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -) - -// CreateImageOptions contains the optional parameters for the Client.CreateImage method. -type CreateImageOptions struct { - // placeholder for future optional parameters -} - -// CreateImageResponse contains the response from method Client.CreateImage. -type CreateImageResponse struct { - ImageGenerations -} - -// CreateImage creates an image using the Dall-E API. -func (client *Client) CreateImage(ctx context.Context, body ImageGenerationOptions, options *CreateImageOptions) (CreateImageResponse, error) { - // on Azure the image generation API is a poller. This is a temporary state so we're abstracting it away - // until it becomes a sync endpoint. - if client.azure { - return generateImageWithAzure(client, ctx, body) - } - - return generateImageWithOpenAI(ctx, client, body) -} - -func generateImageWithAzure(client *Client, ctx context.Context, body ImageGenerationOptions) (CreateImageResponse, error) { - resp, err := client.beginAzureBatchImageGeneration(ctx, body, nil) - - if err != nil { - return CreateImageResponse{}, err - } - - v, err := resp.PollUntilDone(ctx, nil) - - if err != nil { - return CreateImageResponse{}, err - } - - return CreateImageResponse{ - ImageGenerations: *v.Result, - }, nil -} - -func generateImageWithOpenAI(ctx context.Context, client *Client, body ImageGenerationOptions) (CreateImageResponse, error) { - urlPath := "/images/generations" - req, err := runtime.NewRequest(ctx, http.MethodPost, client.formatURL(urlPath, "")) - if err != nil { - return CreateImageResponse{}, err - } - reqQP := req.Raw().URL.Query() - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["Accept"] = []string{"application/json"} - - if err := runtime.MarshalAsJSON(req, body); err != nil { - return CreateImageResponse{}, err - } - - resp, err := client.internal.Pipeline().Do(req) - - if err != nil { - return CreateImageResponse{}, err - } - - if !runtime.HasStatusCode(resp, http.StatusOK) { - return CreateImageResponse{}, runtime.NewResponseError(resp) - } - - var gens *ImageGenerations - - if err := runtime.UnmarshalAsJSON(resp, &gens); err != nil { - return CreateImageResponse{}, err - } - - return CreateImageResponse{ - ImageGenerations: *gens, - }, err -} diff --git a/sdk/ai/azopenai/custom_client_image_test.go b/sdk/ai/azopenai/custom_client_image_test.go index 3de3e0f3c1f0..212f5e55887d 100644 --- a/sdk/ai/azopenai/custom_client_image_test.go +++ b/sdk/ai/azopenai/custom_client_image_test.go @@ -7,10 +7,8 @@ package azopenai_test import ( - "bytes" "context" "encoding/base64" - "image/png" "net/http" "testing" "time" @@ -26,8 +24,8 @@ func TestImageGeneration_AzureOpenAI(t *testing.T) { t.Skipf("Ignoring poller-based test") } - client := newTestClient(t, azureOpenAI.Endpoint) - testImageGeneration(t, client, azopenai.ImageGenerationResponseFormatURL) + client := newTestClient(t, azureOpenAI.DallE.Endpoint) + testImageGeneration(t, client, azureOpenAI.DallE.Model, azopenai.ImageGenerationResponseFormatURL) } func TestImageGeneration_OpenAI(t *testing.T) { @@ -36,7 +34,7 @@ func TestImageGeneration_OpenAI(t *testing.T) { } client := newOpenAIClientForTest(t) - testImageGeneration(t, client, azopenai.ImageGenerationResponseFormatURL) + testImageGeneration(t, client, openAI.DallE.Model, azopenai.ImageGenerationResponseFormatURL) } func TestImageGeneration_AzureOpenAI_WithError(t *testing.T) { @@ -63,17 +61,20 @@ func TestImageGeneration_OpenAI_Base64(t *testing.T) { } client := newOpenAIClientForTest(t) - testImageGeneration(t, client, azopenai.ImageGenerationResponseFormatB64JSON) + testImageGeneration(t, client, openAI.DallE.Model, azopenai.ImageGenerationResponseFormatBase64) } -func testImageGeneration(t *testing.T, client *azopenai.Client, responseFormat azopenai.ImageGenerationResponseFormat) { +func testImageGeneration(t *testing.T, client *azopenai.Client, model string, responseFormat azopenai.ImageGenerationResponseFormat) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() - resp, err := client.CreateImage(ctx, azopenai.ImageGenerationOptions{ - Prompt: to.Ptr("a cat"), - Size: to.Ptr(azopenai.ImageSize256x256), + resp, err := client.GetImageGenerations(ctx, azopenai.ImageGenerationOptions{ + // saw this prompt in a thread about trying to _prevent_ Dall-E3 from rewriting your + // propmt. When this is revised you'll see the text in the + Prompt: to.Ptr("acrylic painting of a sunflower with bees"), + Size: to.Ptr(azopenai.ImageSizeSize1024X1792), ResponseFormat: &responseFormat, + DeploymentName: &model, }, nil) require.NoError(t, err) @@ -83,17 +84,12 @@ func testImageGeneration(t *testing.T, client *azopenai.Client, responseFormat a headResp, err := http.DefaultClient.Head(*resp.Data[0].URL) require.NoError(t, err) require.Equal(t, http.StatusOK, headResp.StatusCode) - case azopenai.ImageGenerationResponseFormatB64JSON: - pngBytes, err := base64.StdEncoding.DecodeString(*resp.Data[0].Base64Data) - require.NoError(t, err) - require.NotEmpty(t, pngBytes) - - // the bytes here should just be a valid PNG - buff := bytes.NewBuffer(pngBytes) - - // just check that it's a valid PNG - _, err = png.Decode(buff) + require.NotEmpty(t, resp.Data[0].RevisedPrompt) + case azopenai.ImageGenerationResponseFormatBase64: + imgBytes, err := base64.StdEncoding.DecodeString(*resp.Data[0].Base64Data) require.NoError(t, err) + require.NotEmpty(t, imgBytes) + require.NotEmpty(t, resp.Data[0].RevisedPrompt) } } } @@ -102,10 +98,11 @@ func testImageGenerationFailure(t *testing.T, bogusClient *azopenai.Client) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() - resp, err := bogusClient.CreateImage(ctx, azopenai.ImageGenerationOptions{ + resp, err := bogusClient.GetImageGenerations(ctx, azopenai.ImageGenerationOptions{ Prompt: to.Ptr("a cat"), - Size: to.Ptr(azopenai.ImageSize256x256), + Size: to.Ptr(azopenai.ImageSizeSize256X256), ResponseFormat: to.Ptr(azopenai.ImageGenerationResponseFormatURL), + DeploymentName: to.Ptr("ignored"), }, nil) require.Empty(t, resp) diff --git a/sdk/ai/azopenai/custom_client_test.go b/sdk/ai/azopenai/custom_client_test.go index 7f3264ee6442..9d851d213171 100644 --- a/sdk/ai/azopenai/custom_client_test.go +++ b/sdk/ai/azopenai/custom_client_test.go @@ -92,10 +92,10 @@ func TestGetCompletionsStream_OpenAI(t *testing.T) { func testGetCompletionsStream(t *testing.T, client *azopenai.Client, tv testVars) { body := azopenai.CompletionsOptions{ - Prompt: []string{"What is Azure OpenAI?"}, - MaxTokens: to.Ptr(int32(2048)), - Temperature: to.Ptr(float32(0.0)), - Deployment: tv.Completions, + Prompt: []string{"What is Azure OpenAI?"}, + MaxTokens: to.Ptr(int32(2048)), + Temperature: to.Ptr(float32(0.0)), + DeploymentName: &tv.Completions, } response, err := client.GetCompletionsStream(context.TODO(), body, nil) @@ -120,8 +120,8 @@ func testGetCompletionsStream(t *testing.T, client *azopenai.Client, tv testVars } if completion.PromptFilterResults != nil { - require.Equal(t, []azopenai.PromptFilterResult{ - {PromptIndex: to.Ptr[int32](0), ContentFilterResults: (*azopenai.PromptFilterResultContentFilterResults)(safeContentFilter)}, + require.Equal(t, []azopenai.ContentFilterResultsForPrompt{ + {PromptIndex: to.Ptr[int32](0), ContentFilterResults: safeContentFilterResultDetailsForPrompt}, }, completion.PromptFilterResults) } @@ -154,10 +154,10 @@ func TestClient_GetCompletions_Error(t *testing.T) { doTest := func(t *testing.T, client *azopenai.Client, model string) { streamResp, err := client.GetCompletionsStream(context.Background(), azopenai.CompletionsOptions{ - Prompt: []string{"What is Azure OpenAI?"}, - MaxTokens: to.Ptr(int32(2048 - 127)), - Temperature: to.Ptr(float32(0.0)), - Deployment: model, + Prompt: []string{"What is Azure OpenAI?"}, + MaxTokens: to.Ptr(int32(2048 - 127)), + Temperature: to.Ptr(float32(0.0)), + DeploymentName: &model, }, nil) require.Empty(t, streamResp) assertResponseIsError(t, err) diff --git a/sdk/ai/azopenai/custom_models.go b/sdk/ai/azopenai/custom_models.go index 5a12cd336e29..48d0348f8460 100644 --- a/sdk/ai/azopenai/custom_models.go +++ b/sdk/ai/azopenai/custom_models.go @@ -60,6 +60,27 @@ type ContentFilterResponseError struct { ContentFilterResults *ContentFilterResults } +// ContentFilterResults are the content filtering results for a [ContentFilterResponseError]. +type ContentFilterResults struct { + // Describes language attacks or uses that include pejorative or discriminatory language with reference to a person or identity + // group on the basis of certain differentiating attributes of these groups + // including but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation, religion, + // immigration status, ability status, personal appearance, and body size. + Hate *ContentFilterResult `json:"hate"` + + // Describes language related to physical actions intended to purposely hurt, injure, or damage one’s body, or kill oneself. + SelfHarm *ContentFilterResult `json:"self_harm"` + + // Describes language related to anatomical organs and genitals, romantic relationships, acts portrayed in erotic or affectionate + // terms, physical sexual acts, including those portrayed as an assault or a + // forced sexual violent act against one’s will, prostitution, pornography, and abuse. + Sexual *ContentFilterResult `json:"sexual"` + + // Describes language related to physical actions intended to hurt, injure, damage, or kill someone or something; describes + // weapons, etc. + Violence *ContentFilterResult `json:"violence"` +} + // Unwrap returns the inner error for this error. func (e *ContentFilterResponseError) Unwrap() error { return &e.ResponseError @@ -81,7 +102,7 @@ func newContentFilterResponseError(resp *http.Response) error { var envelope *struct { Error struct { InnerError struct { - FilterResult *ContentFilterResults `json:"content_filter_result"` + ContentFilterResults *ContentFilterResults `json:"content_filter_result"` } `json:"innererror"` } } @@ -92,7 +113,7 @@ func newContentFilterResponseError(resp *http.Response) error { return &ContentFilterResponseError{ ResponseError: *respErr, - ContentFilterResults: envelope.Error.InnerError.FilterResult, + ContentFilterResults: envelope.Error.InnerError.ContentFilterResults, } } @@ -101,3 +122,13 @@ type AzureChatExtensionOptions struct { // Extensions is a slice of extensions to the chat completions endpoint, like Azure Cognitive Search. Extensions []AzureChatExtensionConfiguration } + +// Error implements the error interface for type Error. +// Note that the message contents are not contractual and can change over time. +func (e *Error) Error() string { + if e.message == nil { + return "" + } + + return *e.message +} diff --git a/sdk/ai/azopenai/custom_client_functions.go b/sdk/ai/azopenai/custom_models_functions.go similarity index 85% rename from sdk/ai/azopenai/custom_client_functions.go rename to sdk/ai/azopenai/custom_models_functions.go index 480f82a862fc..95ee1a9b58bd 100644 --- a/sdk/ai/azopenai/custom_client_functions.go +++ b/sdk/ai/azopenai/custom_models_functions.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. @@ -11,6 +8,10 @@ import ( "errors" ) +// These models and their code are used for the old style of function calling. They were auto-generated +// in the past (and won't change since they're an evolutionary dead-end at this point) but some older +// and supported models can only use this variant. + // ChatCompletionsOptionsFunctionCall - Controls how the model responds to function calls. "none" means the model does not // call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a // function. Specifying a particular function via {"name": "my_function"} forces the model to call that function. "none" is diff --git a/sdk/ai/azopenai/custom_models_test.go b/sdk/ai/azopenai/custom_models_test.go index d737e8a206f3..0b8fed161529 100644 --- a/sdk/ai/azopenai/custom_models_test.go +++ b/sdk/ai/azopenai/custom_models_test.go @@ -63,4 +63,6 @@ func TestParseResponseError(t *testing.T) { require.Equal(t, &ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(ContentFilterSeveritySafe)}, contentFilterResults.Hate) require.Equal(t, &ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(ContentFilterSeveritySafe)}, contentFilterResults.SelfHarm) require.Equal(t, &ContentFilterResult{Filtered: to.Ptr(false), Severity: to.Ptr(ContentFilterSeveritySafe)}, contentFilterResults.Sexual) + + require.NotNil(t, contentFilterResults) } diff --git a/sdk/ai/azopenai/example_client_audio_test.go b/sdk/ai/azopenai/example_client_audio_test.go index 8a59e838228a..94b11291fe55 100644 --- a/sdk/ai/azopenai/example_client_audio_test.go +++ b/sdk/ai/azopenai/example_client_audio_test.go @@ -53,7 +53,7 @@ func ExampleClient_GetAudioTranscription() { // different or additional metadata. See [azopenai.AudioTranscriptionFormat] for more examples. ResponseFormat: to.Ptr(azopenai.AudioTranscriptionFormatText), - Deployment: modelDeploymentID, + DeploymentName: &modelDeploymentID, }, nil) if err != nil { diff --git a/sdk/ai/azopenai/example_client_createimage_test.go b/sdk/ai/azopenai/example_client_createimage_test.go index b5d872308ecf..a55f521607c6 100644 --- a/sdk/ai/azopenai/example_client_createimage_test.go +++ b/sdk/ai/azopenai/example_client_createimage_test.go @@ -18,13 +18,15 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" ) -func ExampleClient_CreateImage() { - azureOpenAIKey := os.Getenv("AOAI_API_KEY") +func ExampleClient_GetImageGenerations() { + azureOpenAIKey := os.Getenv("AOAI_DALLE_API_KEY") // Ex: "https://.openai.azure.com" - azureOpenAIEndpoint := os.Getenv("AOAI_ENDPOINT") + azureOpenAIEndpoint := os.Getenv("AOAI_DALLE_ENDPOINT") - if azureOpenAIKey == "" || azureOpenAIEndpoint == "" { + azureDeployment := os.Getenv("AOAI_DALLE_MODEL") + + if azureOpenAIKey == "" || azureOpenAIEndpoint == "" || azureDeployment == "" { fmt.Fprintf(os.Stderr, "Skipping example, environment variables missing\n") return } @@ -38,9 +40,10 @@ func ExampleClient_CreateImage() { log.Fatalf("ERROR: %s", err) } - resp, err := client.CreateImage(context.TODO(), azopenai.ImageGenerationOptions{ + resp, err := client.GetImageGenerations(context.TODO(), azopenai.ImageGenerationOptions{ Prompt: to.Ptr("a cat"), ResponseFormat: to.Ptr(azopenai.ImageGenerationResponseFormatURL), + DeploymentName: &azureDeployment, }, nil) if err != nil { @@ -56,7 +59,7 @@ func ExampleClient_CreateImage() { resp, err := http.Head(*generatedImage.URL) if err != nil { - // TODO: handle error + // TODO: Update the following line with your application specific error handling logic log.Fatalf("ERROR: %s", err) } diff --git a/sdk/ai/azopenai/example_client_embeddings_test.go b/sdk/ai/azopenai/example_client_embeddings_test.go index ab18b5eecd12..6bcc04eb15b1 100644 --- a/sdk/ai/azopenai/example_client_embeddings_test.go +++ b/sdk/ai/azopenai/example_client_embeddings_test.go @@ -37,8 +37,8 @@ func ExampleClient_GetEmbeddings() { } resp, err := client.GetEmbeddings(context.TODO(), azopenai.EmbeddingsOptions{ - Input: []string{"The food was delicious and the waiter..."}, - Deployment: modelDeploymentID, + Input: []string{"The food was delicious and the waiter..."}, + DeploymentName: &modelDeploymentID, }, nil) if err != nil { diff --git a/sdk/ai/azopenai/example_client_getchatcompletions_extensions_test.go b/sdk/ai/azopenai/example_client_getchatcompletions_extensions_test.go index 9f7941e9e2b3..dad9520dc638 100644 --- a/sdk/ai/azopenai/example_client_getchatcompletions_extensions_test.go +++ b/sdk/ai/azopenai/example_client_getchatcompletions_extensions_test.go @@ -19,11 +19,11 @@ import ( // // [Azure OpenAI on your data]: https://learn.microsoft.com/azure/ai-services/openai/concepts/use-your-data func ExampleClient_GetChatCompletions_bringYourOwnDataWithCognitiveSearch() { - azureOpenAIKey := os.Getenv("AOAI_API_KEY") - modelDeploymentID := os.Getenv("AOAI_CHAT_COMPLETIONS_MODEL") + azureOpenAIKey := os.Getenv("AOAI_CHAT_COMPLETIONS_RAI_API_KEY") + modelDeploymentID := os.Getenv("AOAI_CHAT_COMPLETIONS_RAI_MODEL") // Ex: "https://.openai.azure.com" - azureOpenAIEndpoint := os.Getenv("AOAI_ENDPOINT") + azureOpenAIEndpoint := os.Getenv("AOAI_CHAT_COMPLETIONS_RAI_ENDPOINT") // Azure Cognitive Search configuration searchIndex := os.Getenv("COGNITIVE_SEARCH_API_INDEX") @@ -47,30 +47,29 @@ func ExampleClient_GetChatCompletions_bringYourOwnDataWithCognitiveSearch() { } resp, err := client.GetChatCompletions(context.TODO(), azopenai.ChatCompletionsOptions{ - Messages: []azopenai.ChatMessage{ - {Content: to.Ptr("What are the differences between Azure Machine Learning and Azure AI services?"), Role: to.Ptr(azopenai.ChatRoleUser)}, + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("What are the differences between Azure Machine Learning and Azure AI services?")}, }, MaxTokens: to.Ptr[int32](512), - AzureExtensionsOptions: &azopenai.AzureChatExtensionOptions{ - Extensions: []azopenai.AzureChatExtensionConfiguration{ - { - // This allows Azure OpenAI to use an Azure Cognitive Search index. - // - // > Because the model has access to, and can reference specific sources to support its responses, answers are not only based on its pretrained knowledge - // > but also on the latest information available in the designated data source. This grounding data also helps the model avoid generating responses - // > based on outdated or incorrect information. - // - // Quote from here: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/use-your-data - Type: to.Ptr(azopenai.AzureChatExtensionTypeAzureCognitiveSearch), - Parameters: azopenai.AzureCognitiveSearchChatExtensionConfiguration{ - Endpoint: &searchEndpoint, - IndexName: &searchIndex, - Key: &searchAPIKey, + AzureExtensionsOptions: []azopenai.AzureChatExtensionConfigurationClassification{ + &azopenai.AzureCognitiveSearchChatExtensionConfiguration{ + // This allows Azure OpenAI to use an Azure Cognitive Search index. + // + // > Because the model has access to, and can reference specific sources to support its responses, answers are not only based on its pretrained knowledge + // > but also on the latest information available in the designated data source. This grounding data also helps the model avoid generating responses + // > based on outdated or incorrect information. + // + // Quote from here: https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/use-your-data + Parameters: &azopenai.AzureCognitiveSearchChatExtensionParameters{ + Endpoint: &searchEndpoint, + IndexName: &searchIndex, + Authentication: &azopenai.OnYourDataAPIKeyAuthenticationOptions{ + Key: &searchAPIKey, }, }, }, }, - Deployment: modelDeploymentID, + DeploymentName: &modelDeploymentID, }, nil) if err != nil { diff --git a/sdk/ai/azopenai/example_client_getchatcompletions_test.go b/sdk/ai/azopenai/example_client_getchatcompletions_test.go index dbffcb2ddf21..1df056f22e6e 100644 --- a/sdk/ai/azopenai/example_client_getchatcompletions_test.go +++ b/sdk/ai/azopenai/example_client_getchatcompletions_test.go @@ -42,18 +42,18 @@ func ExampleClient_GetChatCompletions() { // This is a conversation in progress. // NOTE: all messages, regardless of role, count against token usage for this API. - messages := []azopenai.ChatMessage{ + messages := []azopenai.ChatRequestMessageClassification{ // You set the tone and rules of the conversation with a prompt as the system role. - {Role: to.Ptr(azopenai.ChatRoleSystem), Content: to.Ptr("You are a helpful assistant. You will talk like a pirate.")}, + &azopenai.ChatRequestSystemMessage{Content: to.Ptr("You are a helpful assistant. You will talk like a pirate.")}, // The user asks a question - {Role: to.Ptr(azopenai.ChatRoleUser), Content: to.Ptr("Can you help me?")}, + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("Can you help me?")}, // The reply would come back from the ChatGPT. You'd add it to the conversation so we can maintain context. - {Role: to.Ptr(azopenai.ChatRoleAssistant), Content: to.Ptr("Arrrr! Of course, me hearty! What can I do for ye?")}, + &azopenai.ChatRequestAssistantMessage{Content: to.Ptr("Arrrr! Of course, me hearty! What can I do for ye?")}, // The user answers the question based on the latest reply. - {Role: to.Ptr(azopenai.ChatRoleUser), Content: to.Ptr("What's the best way to train a parrot?")}, + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("What's the best way to train a parrot?")}, // from here you'd keep iterating, sending responses back from ChatGPT } @@ -63,8 +63,8 @@ func ExampleClient_GetChatCompletions() { resp, err := client.GetChatCompletions(context.TODO(), azopenai.ChatCompletionsOptions{ // This is a conversation in progress. // NOTE: all messages count against token usage for this API. - Messages: messages, - Deployment: modelDeploymentID, + Messages: messages, + DeploymentName: &modelDeploymentID, }, nil) if err != nil { @@ -129,11 +129,95 @@ func ExampleClient_GetChatCompletions_functions() { } resp, err := client.GetChatCompletions(context.TODO(), azopenai.ChatCompletionsOptions{ - Deployment: modelDeploymentID, - Messages: []azopenai.ChatMessage{ - { - Role: to.Ptr(azopenai.ChatRoleUser), - Content: to.Ptr("What's the weather like in Boston, MA, in celsius?"), + DeploymentName: &modelDeploymentID, + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestUserMessage{ + Content: azopenai.NewChatRequestUserMessageContent("What's the weather like in Boston, MA, in celsius?"), + }, + }, + Tools: []azopenai.ChatCompletionsToolDefinitionClassification{ + &azopenai.ChatCompletionsFunctionToolDefinition{ + Function: &azopenai.FunctionDefinition{ + Name: to.Ptr("get_current_weather"), + Description: to.Ptr("Get the current weather in a given location"), + Parameters: map[string]any{ + "required": []string{"location"}, + "type": "object", + "properties": map[string]any{ + "location": map[string]any{ + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": map[string]any{ + "type": "string", + "enum": []string{"celsius", "fahrenheit"}, + }, + }, + }, + }, + }, + }, + Temperature: to.Ptr[float32](0.0), + }, nil) + + if err != nil { + // TODO: Update the following line with your application specific error handling logic + log.Fatalf("ERROR: %s", err) + } + + funcCall := resp.Choices[0].Message.ToolCalls[0].(*azopenai.ChatCompletionsFunctionToolCall).Function + + // This is the function name we gave in the call to GetCompletions + // Prints: Function name: "get_current_weather" + fmt.Fprintf(os.Stderr, "Function name: %q\n", *funcCall.Name) + + // The arguments for your function come back as a JSON string + var funcParams *struct { + Location string `json:"location"` + Unit string `json:"unit"` + } + err = json.Unmarshal([]byte(*funcCall.Arguments), &funcParams) + + if err != nil { + // TODO: Update the following line with your application specific error handling logic + log.Fatalf("ERROR: %s", err) + } + + // Prints: + // Parameters: azopenai_test.location{Location:"Boston, MA", Unit:"celsius"} + fmt.Fprintf(os.Stderr, "Parameters: %#v\n", *funcParams) + + // Output: +} + +func ExampleClient_GetChatCompletions_legacyFunctions() { + azureOpenAIKey := os.Getenv("AOAI_API_KEY") + modelDeploymentID := os.Getenv("AOAI_CHAT_COMPLETIONS_MODEL_LEGACY_FUNCTIONS") + + // Ex: "https://.openai.azure.com" + azureOpenAIEndpoint := os.Getenv("AOAI_ENDPOINT") + + if azureOpenAIKey == "" || modelDeploymentID == "" || azureOpenAIEndpoint == "" { + fmt.Fprintf(os.Stderr, "Skipping example, environment variables missing\n") + return + } + + keyCredential := azcore.NewKeyCredential(azureOpenAIKey) + + // In Azure OpenAI you must deploy a model before you can use it in your client. For more information + // see here: https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource + client, err := azopenai.NewClientWithKeyCredential(azureOpenAIEndpoint, keyCredential, nil) + + if err != nil { + // TODO: Update the following line with your application specific error handling logic + log.Fatalf("ERROR: %s", err) + } + + resp, err := client.GetChatCompletions(context.TODO(), azopenai.ChatCompletionsOptions{ + DeploymentName: &modelDeploymentID, + Messages: []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestUserMessage{ + Content: azopenai.NewChatRequestUserMessageContent("What's the weather like in Boston, MA, in celsius?"), }, }, FunctionCall: &azopenai.ChatCompletionsOptionsFunctionCall{ @@ -218,18 +302,18 @@ func ExampleClient_GetChatCompletionsStream() { // This is a conversation in progress. // NOTE: all messages, regardless of role, count against token usage for this API. - messages := []azopenai.ChatMessage{ + messages := []azopenai.ChatRequestMessageClassification{ // You set the tone and rules of the conversation with a prompt as the system role. - {Role: to.Ptr(azopenai.ChatRoleSystem), Content: to.Ptr("You are a helpful assistant. You will talk like a pirate and limit your responses to 20 words or less.")}, + &azopenai.ChatRequestSystemMessage{Content: to.Ptr("You are a helpful assistant. You will talk like a pirate and limit your responses to 20 words or less.")}, // The user asks a question - {Role: to.Ptr(azopenai.ChatRoleUser), Content: to.Ptr("Can you help me?")}, + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("Can you help me?")}, // The reply would come back from the ChatGPT. You'd add it to the conversation so we can maintain context. - {Role: to.Ptr(azopenai.ChatRoleAssistant), Content: to.Ptr("Arrrr! Of course, me hearty! What can I do for ye?")}, + &azopenai.ChatRequestAssistantMessage{Content: to.Ptr("Arrrr! Of course, me hearty! What can I do for ye?")}, // The user answers the question based on the latest reply. - {Role: to.Ptr(azopenai.ChatRoleUser), Content: to.Ptr("What's the best way to train a parrot?")}, + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("What's the best way to train a parrot?")}, // from here you'd keep iterating, sending responses back from ChatGPT } @@ -237,9 +321,9 @@ func ExampleClient_GetChatCompletionsStream() { resp, err := client.GetChatCompletionsStream(context.TODO(), azopenai.ChatCompletionsOptions{ // This is a conversation in progress. // NOTE: all messages count against token usage for this API. - Messages: messages, - N: to.Ptr[int32](1), - Deployment: modelDeploymentID, + Messages: messages, + N: to.Ptr[int32](1), + DeploymentName: &modelDeploymentID, }, nil) if err != nil { @@ -259,7 +343,7 @@ func ExampleClient_GetChatCompletionsStream() { } if err != nil { - // TODO: handle error + // TODO: Update the following line with your application specific error handling logic log.Fatalf("ERROR: %s", err) } diff --git a/sdk/ai/azopenai/example_client_getcompletions_test.go b/sdk/ai/azopenai/example_client_getcompletions_test.go index abede1c33ccb..5746b4d07e3a 100644 --- a/sdk/ai/azopenai/example_client_getcompletions_test.go +++ b/sdk/ai/azopenai/example_client_getcompletions_test.go @@ -40,10 +40,10 @@ func ExampleClient_GetCompletions() { } resp, err := client.GetCompletions(context.TODO(), azopenai.CompletionsOptions{ - Prompt: []string{"What is Azure OpenAI, in 20 words or less"}, - MaxTokens: to.Ptr(int32(2048)), - Temperature: to.Ptr(float32(0.0)), - Deployment: modelDeployment, + Prompt: []string{"What is Azure OpenAI, in 20 words or less"}, + MaxTokens: to.Ptr(int32(2048)), + Temperature: to.Ptr(float32(0.0)), + DeploymentName: &modelDeployment, }, nil) if err != nil { @@ -82,10 +82,10 @@ func ExampleClient_GetCompletionsStream() { } resp, err := client.GetCompletionsStream(context.TODO(), azopenai.CompletionsOptions{ - Prompt: []string{"What is Azure OpenAI, in 20 words or less?"}, - MaxTokens: to.Ptr(int32(2048)), - Temperature: to.Ptr(float32(0.0)), - Deployment: modelDeploymentID, + Prompt: []string{"What is Azure OpenAI, in 20 words or less?"}, + MaxTokens: to.Ptr(int32(2048)), + Temperature: to.Ptr(float32(0.0)), + DeploymentName: &modelDeploymentID, }, nil) if err != nil { @@ -104,7 +104,7 @@ func ExampleClient_GetCompletionsStream() { } if err != nil { - // TODO: handle error + // TODO: Update the following line with your application specific error handling logic log.Fatalf("ERROR: %s", err) } diff --git a/sdk/ai/azopenai/genopenapi3.ps1 b/sdk/ai/azopenai/genopenapi3.ps1 index 4b4920a614fc..4d3aca353bb0 100644 --- a/sdk/ai/azopenai/genopenapi3.ps1 +++ b/sdk/ai/azopenai/genopenapi3.ps1 @@ -1,5 +1,20 @@ Push-Location ./testdata npm install + +if ($LASTEXITCODE -ne 0) { + Exit 1 +} + npm run pull + +if ($LASTEXITCODE -ne 0) { + Exit 1 +} + npm run build -Pop-Location \ No newline at end of file + +if ($LASTEXITCODE -ne 0) { + Exit 1 +} + +Pop-Location diff --git a/sdk/ai/azopenai/interfaces.go b/sdk/ai/azopenai/interfaces.go new file mode 100644 index 000000000000..50d7f51cf05d --- /dev/null +++ b/sdk/ai/azopenai/interfaces.go @@ -0,0 +1,84 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azopenai + +// AzureChatExtensionConfigurationClassification provides polymorphic access to related types. +// Call the interface's GetAzureChatExtensionConfiguration() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *AzureChatExtensionConfiguration, *AzureCognitiveSearchChatExtensionConfiguration, *AzureCosmosDBChatExtensionConfiguration, +// - *AzureMachineLearningIndexChatExtensionConfiguration, *ElasticsearchChatExtensionConfiguration, *PineconeChatExtensionConfiguration +type AzureChatExtensionConfigurationClassification interface { + // GetAzureChatExtensionConfiguration returns the AzureChatExtensionConfiguration content of the underlying type. + GetAzureChatExtensionConfiguration() *AzureChatExtensionConfiguration +} + +// ChatCompletionRequestMessageContentPartClassification provides polymorphic access to related types. +// Call the interface's GetChatCompletionRequestMessageContentPart() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *ChatCompletionRequestMessageContentPart, *ChatCompletionRequestMessageContentPartImage, *ChatCompletionRequestMessageContentPartText +type ChatCompletionRequestMessageContentPartClassification interface { + // GetChatCompletionRequestMessageContentPart returns the ChatCompletionRequestMessageContentPart content of the underlying type. + GetChatCompletionRequestMessageContentPart() *ChatCompletionRequestMessageContentPart +} + +// ChatCompletionsToolCallClassification provides polymorphic access to related types. +// Call the interface's GetChatCompletionsToolCall() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *ChatCompletionsFunctionToolCall, *ChatCompletionsToolCall +type ChatCompletionsToolCallClassification interface { + // GetChatCompletionsToolCall returns the ChatCompletionsToolCall content of the underlying type. + GetChatCompletionsToolCall() *ChatCompletionsToolCall +} + +// ChatCompletionsToolDefinitionClassification provides polymorphic access to related types. +// Call the interface's GetChatCompletionsToolDefinition() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *ChatCompletionsFunctionToolDefinition, *ChatCompletionsToolDefinition +type ChatCompletionsToolDefinitionClassification interface { + // GetChatCompletionsToolDefinition returns the ChatCompletionsToolDefinition content of the underlying type. + GetChatCompletionsToolDefinition() *ChatCompletionsToolDefinition +} + +// ChatFinishDetailsClassification provides polymorphic access to related types. +// Call the interface's GetChatFinishDetails() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *ChatFinishDetails, *MaxTokensFinishDetails, *StopFinishDetails +type ChatFinishDetailsClassification interface { + // GetChatFinishDetails returns the ChatFinishDetails content of the underlying type. + GetChatFinishDetails() *ChatFinishDetails +} + +// ChatRequestMessageClassification provides polymorphic access to related types. +// Call the interface's GetChatRequestMessage() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *ChatRequestAssistantMessage, *ChatRequestMessage, *ChatRequestSystemMessage, *ChatRequestToolMessage, *ChatRequestUserMessage +type ChatRequestMessageClassification interface { + // GetChatRequestMessage returns the ChatRequestMessage content of the underlying type. + GetChatRequestMessage() *ChatRequestMessage +} + +// OnYourDataAuthenticationOptionsClassification provides polymorphic access to related types. +// Call the interface's GetOnYourDataAuthenticationOptions() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *OnYourDataAPIKeyAuthenticationOptions, *OnYourDataAuthenticationOptions, *OnYourDataConnectionStringAuthenticationOptions, +// - *OnYourDataKeyAndKeyIDAuthenticationOptions, *OnYourDataSystemAssignedManagedIdentityAuthenticationOptions, *OnYourDataUserAssignedManagedIdentityAuthenticationOptions +type OnYourDataAuthenticationOptionsClassification interface { + // GetOnYourDataAuthenticationOptions returns the OnYourDataAuthenticationOptions content of the underlying type. + GetOnYourDataAuthenticationOptions() *OnYourDataAuthenticationOptions +} + +// OnYourDataVectorizationSourceClassification provides polymorphic access to related types. +// Call the interface's GetOnYourDataVectorizationSource() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *OnYourDataDeploymentNameVectorizationSource, *OnYourDataEndpointVectorizationSource, *OnYourDataModelIDVectorizationSource, +// - *OnYourDataVectorizationSource +type OnYourDataVectorizationSourceClassification interface { + // GetOnYourDataVectorizationSource returns the OnYourDataVectorizationSource content of the underlying type. + GetOnYourDataVectorizationSource() *OnYourDataVectorizationSource +} diff --git a/sdk/ai/azopenai/main_test.go b/sdk/ai/azopenai/main_test.go index 39f971160b59..221f28a4aba4 100644 --- a/sdk/ai/azopenai/main_test.go +++ b/sdk/ai/azopenai/main_test.go @@ -4,9 +4,12 @@ package azopenai_test import ( - "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "fmt" "os" "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/joho/godotenv" ) const RecordingDirectory = "sdk/ai/azopenai/testdata" @@ -30,6 +33,10 @@ func run(m *testing.M) int { panic(err) } }() + } else { + if err := godotenv.Load(); err != nil { + fmt.Printf("Failed to load .env file: %s\n", err) + } } return m.Run() diff --git a/sdk/ai/azopenai/models.go b/sdk/ai/azopenai/models.go index e4df0bb6a450..35e9bff6ee05 100644 --- a/sdk/ai/azopenai/models.go +++ b/sdk/ai/azopenai/models.go @@ -8,7 +8,9 @@ package azopenai -import "time" +import ( + "time" +) // AudioTranscription - Result information for an operation that transcribed spoken audio into written text. type AudioTranscription struct { @@ -35,14 +37,17 @@ type AudioTranscriptionOptions struct { // flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. File []byte + // The model to use for this transcription request. + DeploymentName *string + + // The optional filename or descriptive identifier to associate with with the audio data. + Filename *string + // The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code such // as 'en' or 'fr'. Providing this known input language is optional but may improve // the accuracy and/or latency of transcription. Language *string - // REQUIRED: Deployment specifies the name of the deployment (for Azure OpenAI) or model (for OpenAI) to use for this request. - Deployment string - // An optional hint to guide the model's style or continue from a prior audio segment. The written language of the prompt // should match the primary spoken language of the audio data. Prompt *string @@ -119,8 +124,11 @@ type AudioTranslationOptions struct { // flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. File []byte - // REQUIRED: Deployment specifies the name of the deployment (for Azure OpenAI) or model (for OpenAI) to use for this request. - Deployment string + // The model to use for this translation request. + DeploymentName *string + + // The optional filename or descriptive identifier to associate with with the audio data. + Filename *string // An optional hint to guide the model's style or continue from a prior audio segment. The written language of the prompt // should match the primary spoken language of the audio data. @@ -173,18 +181,34 @@ type AudioTranslationSegment struct { Tokens []int32 } +// AzureChatEnhancementConfiguration - A representation of the available Azure OpenAI enhancement configurations. +type AzureChatEnhancementConfiguration struct { + // A representation of the available options for the Azure OpenAI grounding enhancement. + Grounding *AzureChatGroundingEnhancementConfiguration + + // A representation of the available options for the Azure OpenAI optical character recognition (OCR) enhancement. + Ocr *AzureChatOCREnhancementConfiguration +} + +// AzureChatEnhancements - Represents the output results of Azure enhancements to chat completions, as configured via the +// matching input provided in the request. +type AzureChatEnhancements struct { + // The grounding enhancement that returns the bounding box of the objects detected in the image. + Grounding *AzureGroundingEnhancement +} + // AzureChatExtensionConfiguration - A representation of configuration data for a single Azure OpenAI chat extension. This // will be used by a chat completions request that should use Azure OpenAI chat extensions to augment the response // behavior. The use of this configuration is compatible only with Azure OpenAI. type AzureChatExtensionConfiguration struct { - // REQUIRED; The label for the type of an Azure chat extension. This typically corresponds to a matching Azure resource. Azure // chat extensions are only compatible with Azure OpenAI. - Type *AzureChatExtensionType + configType *AzureChatExtensionType +} - // REQUIRED; The configuration payload used for the Azure chat extension. The structure payload details are specific to the - // extension being configured. Azure chat extensions are only compatible with Azure OpenAI. - Parameters any +// GetAzureChatExtensionConfiguration implements the AzureChatExtensionConfigurationClassification interface for type AzureChatExtensionConfiguration. +func (a *AzureChatExtensionConfiguration) GetAzureChatExtensionConfiguration() *AzureChatExtensionConfiguration { + return a } // AzureChatExtensionsMessageContext - A representation of the additional context information available when Azure OpenAI @@ -195,33 +219,62 @@ type AzureChatExtensionsMessageContext struct { // describe the data source retrievals, plugin invocations, and other // intermediate steps taken in the course of generating a chat completions response that was augmented by capabilities from // Azure OpenAI chat extensions. - Messages []ChatMessage + Messages []ChatResponseMessage +} + +// AzureChatGroundingEnhancementConfiguration - A representation of the available options for the Azure OpenAI grounding enhancement. +type AzureChatGroundingEnhancementConfiguration struct { + // REQUIRED; Specifies whether the enhancement is enabled. + Enabled *bool +} + +// AzureChatOCREnhancementConfiguration - A representation of the available options for the Azure OpenAI optical character +// recognition (OCR) enhancement. +type AzureChatOCREnhancementConfiguration struct { + // REQUIRED; Specifies whether the enhancement is enabled. + Enabled *bool } // AzureCognitiveSearchChatExtensionConfiguration - A specific representation of configurable options for Azure Cognitive // Search when using it as an Azure OpenAI chat extension. type AzureCognitiveSearchChatExtensionConfiguration struct { + // REQUIRED; The label for the type of an Azure chat extension. This typically corresponds to a matching Azure resource. Azure + // chat extensions are only compatible with Azure OpenAI. + configType *AzureChatExtensionType + + // REQUIRED; The parameters to use when configuring Azure Cognitive Search. + Parameters *AzureCognitiveSearchChatExtensionParameters +} + +// GetAzureChatExtensionConfiguration implements the AzureChatExtensionConfigurationClassification interface for type AzureCognitiveSearchChatExtensionConfiguration. +func (a *AzureCognitiveSearchChatExtensionConfiguration) GetAzureChatExtensionConfiguration() *AzureChatExtensionConfiguration { + return &AzureChatExtensionConfiguration{ + configType: a.configType, + } +} + +// AzureCognitiveSearchChatExtensionParameters - Parameters for Azure Cognitive Search when used as an Azure OpenAI chat extension. +type AzureCognitiveSearchChatExtensionParameters struct { // REQUIRED; The absolute endpoint path for the Azure Cognitive Search resource to use. Endpoint *string // REQUIRED; The name of the index to use as available in the referenced Azure Cognitive Search resource. IndexName *string - // REQUIRED; The API admin key to use with the specified Azure Cognitive Search endpoint. - Key *string - - // REQUIRED; The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from - // its default value for Azure Cognitive Search. - Type *AzureCognitiveSearchChatExtensionConfigurationType + // The authentication method to use when accessing the defined data source. Each data source type supports a specific set + // of available authentication methods; please see the documentation of the data + // source for supported mechanisms. If not otherwise provided, On Your Data will attempt to use System Managed Identity (default + // credential) authentication. + Authentication OnYourDataAuthenticationOptionsClassification - // When using embeddings for search, specifies the resource URL from which embeddings should be retrieved. - EmbeddingEndpoint *string - - // When using embeddings, specifies the API key to use with the provided embeddings endpoint. - EmbeddingKey *string + // The embedding dependency for vector search. + EmbeddingDependency OnYourDataVectorizationSourceClassification // Customized field mapping behavior to use when interacting with the search index. - FieldsMapping *AzureCognitiveSearchChatExtensionConfigurationFieldsMapping + FieldsMapping *AzureCognitiveSearchIndexFieldMappingOptions + + // Search filter. + Filter *string // Whether queries should be restricted to use of indexed data. InScope *bool @@ -229,25 +282,37 @@ type AzureCognitiveSearchChatExtensionConfiguration struct { // The query type to use with Azure Cognitive Search. QueryType *AzureCognitiveSearchQueryType + // Give the model instructions about how it should behave and any context it should reference when generating a response. + // You can describe the assistant's personality and tell it how to format responses. + // There's a 100 token limit for it, and it counts against the overall token limit. + RoleInformation *string + // The additional semantic configuration for the query. SemanticConfiguration *string + // The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but + // lower recall of the answer. + Strictness *int32 + // The configured top number of documents to feature for the configured query. TopNDocuments *int32 } -// AzureCognitiveSearchChatExtensionConfigurationFieldsMapping - Customized field mapping behavior to use when interacting -// with the search index. -type AzureCognitiveSearchChatExtensionConfigurationFieldsMapping struct { +// AzureCognitiveSearchIndexFieldMappingOptions - Optional settings to control how fields are processed when using a configured +// Azure Cognitive Search resource. +type AzureCognitiveSearchIndexFieldMappingOptions struct { // The names of index fields that should be treated as content. - ContentFieldNames []string + ContentFields []string // The separator pattern that content fields should use. - ContentFieldSeparator *string + ContentFieldsSeparator *string // The name of the index field to use as a filepath. FilepathField *string + // The names of fields that represent image vector data. + ImageVectorFields []string + // The name of the index field to use as a title. TitleField *string @@ -258,76 +323,164 @@ type AzureCognitiveSearchChatExtensionConfigurationFieldsMapping struct { VectorFields []string } -// AzureCognitiveSearchIndexFieldMappingOptions - Optional settings to control how fields are processed when using a configured -// Azure Cognitive Search resource. -type AzureCognitiveSearchIndexFieldMappingOptions struct { - // The names of index fields that should be treated as content. - ContentFieldNames []string +// AzureCosmosDBChatExtensionConfiguration - A specific representation of configurable options for Elasticsearch when using +// it as an Azure OpenAI chat extension. +type AzureCosmosDBChatExtensionConfiguration struct { + // REQUIRED; The label for the type of an Azure chat extension. This typically corresponds to a matching Azure resource. Azure + // chat extensions are only compatible with Azure OpenAI. + configType *AzureChatExtensionType - // The separator pattern that content fields should use. - ContentFieldSeparator *string + // REQUIRED; The parameters to use when configuring Azure OpenAI CosmosDB chat extensions. + Parameters *AzureCosmosDBChatExtensionParameters +} - // The name of the index field to use as a filepath. - FilepathField *string +// GetAzureChatExtensionConfiguration implements the AzureChatExtensionConfigurationClassification interface for type AzureCosmosDBChatExtensionConfiguration. +func (a *AzureCosmosDBChatExtensionConfiguration) GetAzureChatExtensionConfiguration() *AzureChatExtensionConfiguration { + return &AzureChatExtensionConfiguration{ + configType: a.configType, + } +} - // The name of the index field to use as a title. - TitleField *string +// AzureCosmosDBChatExtensionParameters - Parameters to use when configuring Azure OpenAI On Your Data chat extensions when +// using Azure Cosmos DB for MongoDB vCore. +type AzureCosmosDBChatExtensionParameters struct { + // REQUIRED; The name of the Azure Cosmos DB resource container. + ContainerName *string - // The name of the index field to use as a URL. - URLField *string + // REQUIRED; The MongoDB vCore database name to use with Azure Cosmos DB. + DatabaseName *string - // The names of fields that represent vector data. + // REQUIRED; Customized field mapping behavior to use when interacting with the search index. + FieldsMapping *AzureCosmosDBFieldMappingOptions + + // REQUIRED; The MongoDB vCore index name to use with Azure Cosmos DB. + IndexName *string + + // The authentication method to use when accessing the defined data source. Each data source type supports a specific set + // of available authentication methods; please see the documentation of the data + // source for supported mechanisms. If not otherwise provided, On Your Data will attempt to use System Managed Identity (default + // credential) authentication. + Authentication OnYourDataAuthenticationOptionsClassification + + // The embedding dependency for vector search. + EmbeddingDependency OnYourDataVectorizationSourceClassification + + // Whether queries should be restricted to use of indexed data. + InScope *bool + + // Give the model instructions about how it should behave and any context it should reference when generating a response. + // You can describe the assistant's personality and tell it how to format responses. + // There's a 100 token limit for it, and it counts against the overall token limit. + RoleInformation *string + + // The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but + // lower recall of the answer. + Strictness *int32 + + // The configured top number of documents to feature for the configured query. + TopNDocuments *int32 +} + +// AzureCosmosDBFieldMappingOptions - Optional settings to control how fields are processed when using a configured Azure +// Cosmos DB resource. +type AzureCosmosDBFieldMappingOptions struct { + // REQUIRED; The names of fields that represent vector data. VectorFields []string } -// Error - The error object. -type Error struct { - // REQUIRED; One of a server-defined set of error codes. - Code *string +// AzureGroundingEnhancement - The grounding enhancement that returns the bounding box of the objects detected in the image. +type AzureGroundingEnhancement struct { + // REQUIRED; The lines of text detected by the grounding enhancement. + Lines []AzureGroundingEnhancementLine +} - // REQUIRED; A human-readable representation of the error. - Message *string +// AzureGroundingEnhancementCoordinatePoint - A representation of a single polygon point as used by the Azure grounding enhancement. +type AzureGroundingEnhancementCoordinatePoint struct { + // REQUIRED; The x-coordinate (horizontal axis) of the point. + X *float32 - // An array of details about specific errors that led to this reported error. - Details []Error + // REQUIRED; The y-coordinate (vertical axis) of the point. + Y *float32 +} - // An object containing more specific information than the current object about the error. - InnerError *InnerError +// AzureGroundingEnhancementLine - A content line object consisting of an adjacent sequence of content elements, such as words +// and selection marks. +type AzureGroundingEnhancementLine struct { + // REQUIRED; An array of spans that represent detected objects and its bounding box information. + Spans []AzureGroundingEnhancementLineSpan - // The target of the error. - Target *string + // REQUIRED; The text within the line. + Text *string } -// InnerError - An object containing more specific information about the error. As per Microsoft One API -// guidelines - -// https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses. -type InnerError struct { - // One of a server-defined set of error codes. - Code *string +// AzureGroundingEnhancementLineSpan - A span object that represents a detected object and its bounding box information. +type AzureGroundingEnhancementLineSpan struct { + // REQUIRED; The length of the span in characters, measured in Unicode codepoints. + Length *int32 + + // REQUIRED; The character offset within the text where the span begins. This offset is defined as the position of the first + // character of the span, counting from the start of the text as Unicode codepoints. + Offset *int32 - // Inner error. - InnerError *InnerError + // REQUIRED; An array of objects representing points in the polygon that encloses the detected object. + Polygon []AzureGroundingEnhancementCoordinatePoint + + // REQUIRED; The text content of the span that represents the detected object. + Text *string } -// batchImageGenerationOperationResponse - A polling status update or final response payload for an image operation. -type batchImageGenerationOperationResponse struct { - // REQUIRED; A timestamp when this job or item was created (in unix epochs). - Created *time.Time +// AzureMachineLearningIndexChatExtensionConfiguration - A specific representation of configurable options for Azure Machine +// Learning vector index when using it as an Azure OpenAI chat extension. +type AzureMachineLearningIndexChatExtensionConfiguration struct { + // REQUIRED; The label for the type of an Azure chat extension. This typically corresponds to a matching Azure resource. Azure + // chat extensions are only compatible with Azure OpenAI. + configType *AzureChatExtensionType - // REQUIRED; The ID of the operation. - ID *string + // REQUIRED; The parameters for the Azure Machine Learning vector index chat extension. + Parameters *AzureMachineLearningIndexChatExtensionParameters +} - // REQUIRED; The status of the operation - Status *azureOpenAIOperationState +// GetAzureChatExtensionConfiguration implements the AzureChatExtensionConfigurationClassification interface for type AzureMachineLearningIndexChatExtensionConfiguration. +func (a *AzureMachineLearningIndexChatExtensionConfiguration) GetAzureChatExtensionConfiguration() *AzureChatExtensionConfiguration { + return &AzureChatExtensionConfiguration{ + configType: a.configType, + } +} - // The error if the operation failed. - Error *Error +// AzureMachineLearningIndexChatExtensionParameters - Parameters for the Azure Machine Learning vector index chat extension. +type AzureMachineLearningIndexChatExtensionParameters struct { + // REQUIRED; The Azure Machine Learning vector index name. + Name *string + + // REQUIRED; The resource ID of the Azure Machine Learning project. + ProjectResourceID *string + + // REQUIRED; The version of the Azure Machine Learning vector index. + Version *string + + // The authentication method to use when accessing the defined data source. Each data source type supports a specific set + // of available authentication methods; please see the documentation of the data + // source for supported mechanisms. If not otherwise provided, On Your Data will attempt to use System Managed Identity (default + // credential) authentication. + Authentication OnYourDataAuthenticationOptionsClassification + + // Search filter. Only supported if the Azure Machine Learning vector index is of type AzureSearch. + Filter *string + + // Whether queries should be restricted to use of indexed data. + InScope *bool + + // Give the model instructions about how it should behave and any context it should reference when generating a response. + // You can describe the assistant's personality and tell it how to format responses. + // There's a 100 token limit for it, and it counts against the overall token limit. + RoleInformation *string - // A timestamp when this operation and its associated images expire and will be deleted (in unix epochs). - Expires *int64 + // The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but + // lower recall of the answer. + Strictness *int32 - // The result of the operation if the operation succeeded. - Result *ImageGenerations + // The configured top number of documents to feature for the configured query. + TopNDocuments *int32 } // ChatChoice - The representation of a single prompt completion as part of an overall chat completions request. Generally, @@ -343,85 +496,77 @@ type ChatChoice struct { // Information about the content filtering category (hate, sexual, violence, selfharm), if it has been detected, as well as // the severity level (verylow, low, medium, high-scale that determines the // intensity and risk level of harmful content) and if it has been filtered or not. - ContentFilterResults *ChatChoiceContentFilterResults + ContentFilterResults *ContentFilterResultsForChoice // The delta message content for a streaming response. - Delta *ChatChoiceDelta + Delta *ChatResponseMessage - // The chat message for a given chat completions prompt. - Message *ChatChoiceMessage -} + // Represents the output results of Azure OpenAI enhancements to chat completions, as configured via the matching input provided + // in the request. This supplementary information is only available when + // using Azure OpenAI and only when the request is configured to use enhancements. + Enhancements *AzureChatEnhancements -// ChatChoiceContentFilterResults - Information about the content filtering category (hate, sexual, violence, selfharm), if -// it has been detected, as well as the severity level (verylow, low, medium, high-scale that determines the -// intensity and risk level of harmful content) and if it has been filtered or not. -type ChatChoiceContentFilterResults struct { - // Describes an error returned if the content filtering system is down or otherwise unable to complete the operation in time. - Error *ContentFilterResultsError - - // Describes language attacks or uses that include pejorative or discriminatory language with reference to a person or identity - // group on the basis of certain differentiating attributes of these groups - // including but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation, religion, - // immigration status, ability status, personal appearance, and body size. - Hate *ContentFilterResult - - // Describes language related to physical actions intended to purposely hurt, injure, or damage one’s body, or kill oneself. - SelfHarm *ContentFilterResult - - // Describes language related to anatomical organs and genitals, romantic relationships, acts portrayed in erotic or affectionate - // terms, physical sexual acts, including those portrayed as an assault or a - // forced sexual violent act against one’s will, prostitution, pornography, and abuse. - Sexual *ContentFilterResult + // The reason the model stopped generating tokens, together with any applicable details. This structured representation replaces + // 'finish_reason' for some models. + FinishDetails ChatFinishDetailsClassification - // Describes language related to physical actions intended to hurt, injure, damage, or kill someone or something; describes - // weapons, etc. - Violence *ContentFilterResult + // The chat message for a given chat completions prompt. + Message *ChatResponseMessage } -// ChatChoiceDelta - The delta message content for a streaming response. -type ChatChoiceDelta struct { - // REQUIRED; The text associated with this message payload. - Content *string +// ChatCompletionRequestMessageContentPart - represents either an image URL or text content for a prompt +type ChatCompletionRequestMessageContentPart struct { + // REQUIRED; The type of the content part. + partType *ChatCompletionRequestMessageContentPartType +} - // REQUIRED; The role associated with this message payload. - Role *ChatRole +// GetChatCompletionRequestMessageContentPart implements the ChatCompletionRequestMessageContentPartClassification interface +// for type ChatCompletionRequestMessageContentPart. +func (c *ChatCompletionRequestMessageContentPart) GetChatCompletionRequestMessageContentPart() *ChatCompletionRequestMessageContentPart { + return c +} - // Additional context data associated with a chat message when requesting chat completions using compatible Azure OpenAI chat - // extensions. This includes information like the intermediate data source - // retrievals used to service a request. This context information is only populated when using Azure OpenAI with chat extensions - // capabilities configured. - Context *ChatMessageContext +// ChatCompletionRequestMessageContentPartImage - represents an image URL, to be used as part of a prompt +type ChatCompletionRequestMessageContentPartImage struct { + // REQUIRED; The type of the content part. + partType *ChatCompletionRequestMessageContentPartType - // The name and arguments of a function that should be called, as generated by the model. - FunctionCall *ChatMessageFunctionCall + // REQUIRED; contains the URL and level of detail for an image prompt + ImageURL *ChatCompletionRequestMessageContentPartImageURL +} - // The name of the author of this message. name is required if role is function, and it should be the name of the function - // whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, - // with a maximum length of 64 characters. - Name *string +// GetChatCompletionRequestMessageContentPart implements the ChatCompletionRequestMessageContentPartClassification interface +// for type ChatCompletionRequestMessageContentPartImage. +func (c *ChatCompletionRequestMessageContentPartImage) GetChatCompletionRequestMessageContentPart() *ChatCompletionRequestMessageContentPart { + return &ChatCompletionRequestMessageContentPart{ + partType: c.partType, + } } -// ChatChoiceMessage - The chat message for a given chat completions prompt. -type ChatChoiceMessage struct { - // REQUIRED; The text associated with this message payload. - Content *string +// ChatCompletionRequestMessageContentPartImageURL - contains the URL and level of detail for an image prompt +type ChatCompletionRequestMessageContentPartImageURL struct { + // REQUIRED; Either a URL of the image or the base64 encoded image data. + URL *string - // REQUIRED; The role associated with this message payload. - Role *ChatRole + // Specifies the detail level of the image. Learn more in the Vision guide [/docs/guides/vision/low-or-high-fidelity-image-understanding]. + Detail *ChatCompletionRequestMessageContentPartImageURLDetail +} - // Additional context data associated with a chat message when requesting chat completions using compatible Azure OpenAI chat - // extensions. This includes information like the intermediate data source - // retrievals used to service a request. This context information is only populated when using Azure OpenAI with chat extensions - // capabilities configured. - Context *ChatMessageContext +// ChatCompletionRequestMessageContentPartText - represents text content, to be used as part of a prompt +type ChatCompletionRequestMessageContentPartText struct { + // REQUIRED; The type of the content part. + partType *ChatCompletionRequestMessageContentPartType - // The name and arguments of a function that should be called, as generated by the model. - FunctionCall *ChatMessageFunctionCall + // REQUIRED; The text content. + Text *string +} - // The name of the author of this message. name is required if role is function, and it should be the name of the function - // whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, - // with a maximum length of 64 characters. - Name *string +// GetChatCompletionRequestMessageContentPart implements the ChatCompletionRequestMessageContentPartClassification interface +// for type ChatCompletionRequestMessageContentPartText. +func (c *ChatCompletionRequestMessageContentPartText) GetChatCompletionRequestMessageContentPart() *ChatCompletionRequestMessageContentPart { + return &ChatCompletionRequestMessageContentPart{ + partType: c.partType, + } } // ChatCompletions - Representation of the response data from a chat completions request. Completions support a wide variety @@ -439,12 +584,54 @@ type ChatCompletions struct { // REQUIRED; A unique identifier associated with this chat completions response. ID *string + // REQUIRED; Can be used in conjunction with the seed request parameter to understand when backend changes have been made + // that might impact determinism. + SystemFingerprint *string + // REQUIRED; Usage information for tokens processed and generated as part of this completions operation. Usage *CompletionsUsage // Content filtering results for zero or more prompts in the request. In a streaming request, results for different prompts // may arrive at different times or in different orders. - PromptFilterResults []PromptFilterResult + PromptFilterResults []ContentFilterResultsForPrompt +} + +// ChatCompletionsFunctionToolCall - A tool call to a function tool, issued by the model in evaluation of a configured function +// tool, that represents a function invocation needed for a subsequent chat completions request to resolve. +type ChatCompletionsFunctionToolCall struct { + // REQUIRED; The details of the function invocation requested by the tool call. + Function *FunctionCall + + // REQUIRED; The ID of the tool call. + ID *string + + // REQUIRED; The object type. + Type *string +} + +// GetChatCompletionsToolCall implements the ChatCompletionsToolCallClassification interface for type ChatCompletionsFunctionToolCall. +func (c *ChatCompletionsFunctionToolCall) GetChatCompletionsToolCall() *ChatCompletionsToolCall { + return &ChatCompletionsToolCall{ + Type: c.Type, + ID: c.ID, + } +} + +// ChatCompletionsFunctionToolDefinition - The definition information for a chat completions function tool that can call a +// function in response to a tool call. +type ChatCompletionsFunctionToolDefinition struct { + // REQUIRED; The function definition details for the function tool. + Function *FunctionDefinition + + // REQUIRED; The object type. + Type *string +} + +// GetChatCompletionsToolDefinition implements the ChatCompletionsToolDefinitionClassification interface for type ChatCompletionsFunctionToolDefinition. +func (c *ChatCompletionsFunctionToolDefinition) GetChatCompletionsToolDefinition() *ChatCompletionsToolDefinition { + return &ChatCompletionsToolDefinition{ + Type: c.Type, + } } // ChatCompletionsOptions - The configuration information for a chat completions request. Completions support a wide variety @@ -453,11 +640,18 @@ type ChatCompletionsOptions struct { // REQUIRED; The collection of context messages associated with this chat completions request. Typical usage begins with a // chat message for the System role that provides instructions for the behavior of the // assistant, followed by alternating messages between the User and Assistant roles. - Messages []ChatMessage + Messages []ChatRequestMessageClassification // The configuration entries for Azure OpenAI chat extensions that use them. This additional specification is only compatible // with Azure OpenAI. - AzureExtensionsOptions *AzureChatExtensionOptions + AzureExtensionsOptions []AzureChatExtensionConfigurationClassification + + // The model name to provide as part of this completions request. Not applicable to Azure OpenAI, where deployment information + // should be included in the Azure resource URI that's connected to. + DeploymentName *string + + // If provided, the configuration options for available Azure OpenAI chat enhancements. + Enhancements *AzureChatEnhancementConfiguration // A value that influences the probability of generated tokens appearing based on their cumulative frequency in generated // text. Positive values will make tokens less likely to appear as their frequency @@ -484,9 +678,6 @@ type ChatCompletionsOptions struct { // The maximum number of tokens to generate. MaxTokens *int32 - // REQUIRED: Deployment specifies the name of the deployment (for Azure OpenAI) or model (for OpenAI) to use for this request. - Deployment string - // The number of chat completions choices that should be generated for a chat completions response. Because this setting can // generate many completions, it may quickly consume your token quota. Use // carefully and ensure reasonable settings for max_tokens and stop. @@ -497,6 +688,14 @@ type ChatCompletionsOptions struct { // and increase the model's likelihood to output new topics. PresencePenalty *float32 + // An object specifying the format that the model must output. Used to enable JSON mode. + ResponseFormat *ChatCompletionsResponseFormat + + // If specified, the system will make a best effort to sample deterministically such that repeated requests with the same + // seed and parameters should return the same result. Determinism is not guaranteed, + // and you should refer to the system_fingerprint response parameter to monitor changes in the backend." + Seed *int64 + // A collection of textual sequences that will end completions generation. Stop []string @@ -506,6 +705,12 @@ type ChatCompletionsOptions struct { // of these two settings is difficult to predict. Temperature *float32 + // If specified, the model will configure which of the provided tools it can use for the chat completions response. + ToolChoice any + + // The available tool definitions that the chat completions request can use, including caller-defined functions. + Tools []ChatCompletionsToolDefinitionClassification + // An alternative to sampling with temperature called nucleus sampling. This value causes the model to consider the results // of tokens with the provided probability mass. As an example, a value of 0.15 // will cause only the tokens comprising the top 15% of probability mass to be considered. It is not recommended to modify @@ -517,52 +722,151 @@ type ChatCompletionsOptions struct { User *string } -// ChatMessage - A single, role-attributed message within a chat completion interaction. -type ChatMessage struct { - // REQUIRED; The text associated with this message payload. +// ChatCompletionsToolCall - An abstract representation of a tool call that must be resolved in a subsequent request to perform +// the requested chat completion. +type ChatCompletionsToolCall struct { + // REQUIRED; The ID of the tool call. + ID *string + + // REQUIRED; The object type. + Type *string +} + +// GetChatCompletionsToolCall implements the ChatCompletionsToolCallClassification interface for type ChatCompletionsToolCall. +func (c *ChatCompletionsToolCall) GetChatCompletionsToolCall() *ChatCompletionsToolCall { return c } + +// ChatCompletionsToolDefinition - An abstract representation of a tool that can be used by the model to improve a chat completions +// response. +type ChatCompletionsToolDefinition struct { + // REQUIRED; The object type. + Type *string +} + +// GetChatCompletionsToolDefinition implements the ChatCompletionsToolDefinitionClassification interface for type ChatCompletionsToolDefinition. +func (c *ChatCompletionsToolDefinition) GetChatCompletionsToolDefinition() *ChatCompletionsToolDefinition { + return c +} + +// ChatFinishDetails - An abstract representation of structured information about why a chat completions response terminated. +type ChatFinishDetails struct { + // REQUIRED; The object type. + Type *string +} + +// GetChatFinishDetails implements the ChatFinishDetailsClassification interface for type ChatFinishDetails. +func (c *ChatFinishDetails) GetChatFinishDetails() *ChatFinishDetails { return c } + +// ChatRequestAssistantMessage - A request chat message representing response or action from the assistant. +type ChatRequestAssistantMessage struct { + // REQUIRED; The content of the message. Content *string - // REQUIRED; The role associated with this message payload. - Role *ChatRole + // REQUIRED; The chat role associated with this message. + role *ChatRole + + // An optional name for the participant. + Name *string - // Additional context data associated with a chat message when requesting chat completions using compatible Azure OpenAI chat - // extensions. This includes information like the intermediate data source - // retrievals used to service a request. This context information is only populated when using Azure OpenAI with chat extensions - // capabilities configured. - Context *ChatMessageContext + // The tool calls that must be resolved and have their outputs appended to subsequent input messages for the chat completions + // request to resolve as configured. + ToolCalls []ChatCompletionsToolCallClassification +} - // The name and arguments of a function that should be called, as generated by the model. - FunctionCall *ChatMessageFunctionCall +// GetChatRequestMessage implements the ChatRequestMessageClassification interface for type ChatRequestAssistantMessage. +func (c *ChatRequestAssistantMessage) GetChatRequestMessage() *ChatRequestMessage { + return &ChatRequestMessage{ + role: c.role, + } +} - // The name of the author of this message. name is required if role is function, and it should be the name of the function - // whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, - // with a maximum length of 64 characters. +// ChatRequestMessage - An abstract representation of a chat message as provided in a request. +type ChatRequestMessage struct { + // REQUIRED; The chat role associated with this message. + role *ChatRole +} + +// GetChatRequestMessage implements the ChatRequestMessageClassification interface for type ChatRequestMessage. +func (c *ChatRequestMessage) GetChatRequestMessage() *ChatRequestMessage { return c } + +// ChatRequestSystemMessage - A request chat message containing system instructions that influence how the model will generate +// a chat completions response. +type ChatRequestSystemMessage struct { + // REQUIRED; The contents of the system message. + Content *string + + // REQUIRED; The chat role associated with this message. + role *ChatRole + + // An optional name for the participant. Name *string } -// ChatMessageContext - Additional context data associated with a chat message when requesting chat completions using compatible -// Azure OpenAI chat extensions. This includes information like the intermediate data source -// retrievals used to service a request. This context information is only populated when using Azure OpenAI with chat extensions -// capabilities configured. -type ChatMessageContext struct { - // The contextual message payload associated with the Azure chat extensions used for a chat completions request. These messages - // describe the data source retrievals, plugin invocations, and other - // intermediate steps taken in the course of generating a chat completions response that was augmented by capabilities from - // Azure OpenAI chat extensions. - Messages []ChatMessage +// GetChatRequestMessage implements the ChatRequestMessageClassification interface for type ChatRequestSystemMessage. +func (c *ChatRequestSystemMessage) GetChatRequestMessage() *ChatRequestMessage { + return &ChatRequestMessage{ + role: c.role, + } } -// ChatMessageFunctionCall - The name and arguments of a function that should be called, as generated by the model. -type ChatMessageFunctionCall struct { - // REQUIRED; The arguments to call the function with, as generated by the model in JSON format. Note that the model does not - // always generate valid JSON, and may hallucinate parameters not defined by your function - // schema. Validate the arguments in your code before calling your function. - Arguments *string +// ChatRequestToolMessage - A request chat message representing requested output from a configured tool. +type ChatRequestToolMessage struct { + // REQUIRED; The content of the message. + Content *string - // REQUIRED; The name of the function to call. + // REQUIRED; The chat role associated with this message. + role *ChatRole + + // REQUIRED; The ID of the tool call resolved by the provided content. + ToolCallID *string +} + +// GetChatRequestMessage implements the ChatRequestMessageClassification interface for type ChatRequestToolMessage. +func (c *ChatRequestToolMessage) GetChatRequestMessage() *ChatRequestMessage { + return &ChatRequestMessage{ + role: c.role, + } +} + +// ChatRequestUserMessage - A request chat message representing user input to the assistant. +type ChatRequestUserMessage struct { + // REQUIRED; The contents of the user message, with available input types varying by selected model. + Content ChatRequestUserMessageContent + + // REQUIRED; The chat role associated with this message. + role *ChatRole + + // An optional name for the participant. Name *string } +// GetChatRequestMessage implements the ChatRequestMessageClassification interface for type ChatRequestUserMessage. +func (c *ChatRequestUserMessage) GetChatRequestMessage() *ChatRequestMessage { + return &ChatRequestMessage{ + role: c.role, + } +} + +// ChatResponseMessage - A representation of a chat message as received in a response. +type ChatResponseMessage struct { + // REQUIRED; The content of the message. + Content *string + + // REQUIRED; The chat role associated with the message. + Role *ChatRole + + // If Azure OpenAI chat extensions are configured, this array represents the incremental steps performed by those extensions + // while processing the chat completions request. + Context *AzureChatExtensionsMessageContext + + // The function call that must be resolved and have its output appended to subsequent input messages for the chat completions + // request to resolve as configured. + FunctionCall *FunctionCall + + // The tool calls that must be resolved and have their outputs appended to subsequent input messages for the chat completions + // request to resolve as configured. + ToolCalls []ChatCompletionsToolCallClassification +} + // Choice - The representation of a single prompt completion as part of an overall completions request. Generally, n choices // are generated per provided prompt with a default value of 1. Token limits and other // settings may limit the number of choices generated. @@ -582,33 +886,7 @@ type Choice struct { // Information about the content filtering category (hate, sexual, violence, selfharm), if it has been detected, as well as // the severity level (verylow, low, medium, high-scale that determines the // intensity and risk level of harmful content) and if it has been filtered or not. - ContentFilterResults *ChoiceContentFilterResults -} - -// ChoiceContentFilterResults - Information about the content filtering category (hate, sexual, violence, selfharm), if it -// has been detected, as well as the severity level (verylow, low, medium, high-scale that determines the -// intensity and risk level of harmful content) and if it has been filtered or not. -type ChoiceContentFilterResults struct { - // Describes an error returned if the content filtering system is down or otherwise unable to complete the operation in time. - Error *ContentFilterResultsError - - // Describes language attacks or uses that include pejorative or discriminatory language with reference to a person or identity - // group on the basis of certain differentiating attributes of these groups - // including but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation, religion, - // immigration status, ability status, personal appearance, and body size. - Hate *ContentFilterResult - - // Describes language related to physical actions intended to purposely hurt, injure, or damage one’s body, or kill oneself. - SelfHarm *ContentFilterResult - - // Describes language related to anatomical organs and genitals, romantic relationships, acts portrayed in erotic or affectionate - // terms, physical sexual acts, including those portrayed as an assault or a - // forced sexual violent act against one’s will, prostitution, pornography, and abuse. - Sexual *ContentFilterResult - - // Describes language related to physical actions intended to hurt, injure, damage, or kill someone or something; describes - // weapons, etc. - Violence *ContentFilterResult + ContentFilterResults *ContentFilterResultsForChoice } // ChoiceLogProbs - The log probabilities model for tokens associated with this completions choice. @@ -646,7 +924,7 @@ type Completions struct { // Content filtering results for zero or more prompts in the request. In a streaming request, results for different prompts // may arrive at different times or in different orders. - PromptFilterResults []PromptFilterResult + PromptFilterResults []ContentFilterResultsForPrompt } // CompletionsLogProbabilityModel - Representation of a log probabilities model for a completions generation. @@ -676,6 +954,10 @@ type CompletionsOptions struct { // and ensure reasonable settings for maxtokens and stop. BestOf *int32 + // The model name to provide as part of this completions request. Not applicable to Azure OpenAI, where deployment information + // should be included in the Azure resource URI that's connected to. + DeploymentName *string + // A value specifying whether completions responses should include input prompts as prefixes to their generated output. Echo *bool @@ -698,9 +980,6 @@ type CompletionsOptions struct { // The maximum number of tokens to generate. MaxTokens *int32 - // REQUIRED: Deployment specifies the name of the deployment (for Azure OpenAI) or model (for OpenAI) to use for this request. - Deployment string - // The number of completions choices that should be generated per provided prompt as part of an overall completions response. // Because this setting can generate many completions, it may quickly consume // your token quota. Use carefully and ensure reasonable settings for max_tokens and stop. @@ -744,6 +1023,41 @@ type CompletionsUsage struct { TotalTokens *int32 } +// ContentFilterBlocklistIDResult - Represents the outcome of an evaluation against a custom blocklist as performed by content +// filtering. +type ContentFilterBlocklistIDResult struct { + // REQUIRED; A value indicating whether or not the content has been filtered. + Filtered *bool + + // REQUIRED; The ID of the custom blocklist evaluated. + ID *string +} + +// ContentFilterCitedDetectionResult - Represents the outcome of a detection operation against protected resources as performed +// by content filtering. +type ContentFilterCitedDetectionResult struct { + // REQUIRED; A value indicating whether detection occurred, irrespective of severity or whether the content was filtered. + Detected *bool + + // REQUIRED; A value indicating whether or not the content has been filtered. + Filtered *bool + + // REQUIRED; The license description associated with the detection. + License *string + + // The internet location associated with the detection. + URL *string +} + +// ContentFilterDetectionResult - Represents the outcome of a detection operation performed by content filtering. +type ContentFilterDetectionResult struct { + // REQUIRED; A value indicating whether detection occurred, irrespective of severity or whether the content was filtered. + Detected *bool + + // REQUIRED; A value indicating whether or not the content has been filtered. + Filtered *bool +} + // ContentFilterResult - Information about filtered content severity level and if it has been filtered or not. type ContentFilterResult struct { // REQUIRED; A value indicating whether or not the content has been filtered. @@ -753,10 +1067,13 @@ type ContentFilterResult struct { Severity *ContentFilterSeverity } -// ContentFilterResults - Information about the content filtering category, if it has been detected. -type ContentFilterResults struct { +// ContentFilterResultDetailsForPrompt - Information about content filtering evaluated against input data to Azure OpenAI. +type ContentFilterResultDetailsForPrompt struct { + // Describes detection results against configured custom blocklists. + CustomBlocklists []ContentFilterBlocklistIDResult + // Describes an error returned if the content filtering system is down or otherwise unable to complete the operation in time. - Error *ContentFilterResultsError + Error *Error // Describes language attacks or uses that include pejorative or discriminatory language with reference to a person or identity // group on the basis of certain differentiating attributes of these groups @@ -764,6 +1081,12 @@ type ContentFilterResults struct { // immigration status, ability status, personal appearance, and body size. Hate *ContentFilterResult + // Whether a jailbreak attempt was detected in the prompt. + Jailbreak *ContentFilterDetectionResult + + // Describes whether profanity was detected. + Profanity *ContentFilterDetectionResult + // Describes language related to physical actions intended to purposely hurt, injure, or damage one’s body, or kill oneself. SelfHarm *ContentFilterResult @@ -777,23 +1100,128 @@ type ContentFilterResults struct { Violence *ContentFilterResult } -// ContentFilterResultsError - Describes an error returned if the content filtering system is down or otherwise unable to -// complete the operation in time. -type ContentFilterResultsError struct { - // REQUIRED; One of a server-defined set of error codes. - Code *string +// ContentFilterResultsForChoice - Information about content filtering evaluated against generated model output. +type ContentFilterResultsForChoice struct { + // Describes detection results against configured custom blocklists. + CustomBlocklists []ContentFilterBlocklistIDResult - // REQUIRED; A human-readable representation of the error. - Message *string + // Describes an error returned if the content filtering system is down or otherwise unable to complete the operation in time. + Error *Error + + // Describes language attacks or uses that include pejorative or discriminatory language with reference to a person or identity + // group on the basis of certain differentiating attributes of these groups + // including but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation, religion, + // immigration status, ability status, personal appearance, and body size. + Hate *ContentFilterResult + + // Describes whether profanity was detected. + Profanity *ContentFilterDetectionResult - // An array of details about specific errors that led to this reported error. - Details []Error + // Information about detection of protected code material. + ProtectedMaterialCode *ContentFilterCitedDetectionResult - // An object containing more specific information than the current object about the error. - InnerError *InnerError + // Information about detection of protected text material. + ProtectedMaterialText *ContentFilterDetectionResult - // The target of the error. - Target *string + // Describes language related to physical actions intended to purposely hurt, injure, or damage one’s body, or kill oneself. + SelfHarm *ContentFilterResult + + // Describes language related to anatomical organs and genitals, romantic relationships, acts portrayed in erotic or affectionate + // terms, physical sexual acts, including those portrayed as an assault or a + // forced sexual violent act against one’s will, prostitution, pornography, and abuse. + Sexual *ContentFilterResult + + // Describes language related to physical actions intended to hurt, injure, damage, or kill someone or something; describes + // weapons, etc. + Violence *ContentFilterResult +} + +// ContentFilterResultsForPrompt - Content filtering results for a single prompt in the request. +type ContentFilterResultsForPrompt struct { + // REQUIRED; Content filtering results for this prompt + ContentFilterResults *ContentFilterResultDetailsForPrompt + + // REQUIRED; The index of this prompt in the set of prompt results + PromptIndex *int32 +} + +// ElasticsearchChatExtensionConfiguration - A specific representation of configurable options for Elasticsearch when using +// it as an Azure OpenAI chat extension. +type ElasticsearchChatExtensionConfiguration struct { + // REQUIRED; The label for the type of an Azure chat extension. This typically corresponds to a matching Azure resource. Azure + // chat extensions are only compatible with Azure OpenAI. + configType *AzureChatExtensionType + + // REQUIRED; The parameters to use when configuring Elasticsearch®. + Parameters *ElasticsearchChatExtensionParameters +} + +// GetAzureChatExtensionConfiguration implements the AzureChatExtensionConfigurationClassification interface for type ElasticsearchChatExtensionConfiguration. +func (e *ElasticsearchChatExtensionConfiguration) GetAzureChatExtensionConfiguration() *AzureChatExtensionConfiguration { + return &AzureChatExtensionConfiguration{ + configType: e.configType, + } +} + +// ElasticsearchChatExtensionParameters - Parameters to use when configuring Elasticsearch® as an Azure OpenAI chat extension. +type ElasticsearchChatExtensionParameters struct { + // REQUIRED; The endpoint of Elasticsearch®. + Endpoint *string + + // REQUIRED; The index name of Elasticsearch®. + IndexName *string + + // The authentication method to use when accessing the defined data source. Each data source type supports a specific set + // of available authentication methods; please see the documentation of the data + // source for supported mechanisms. If not otherwise provided, On Your Data will attempt to use System Managed Identity (default + // credential) authentication. + Authentication OnYourDataAuthenticationOptionsClassification + + // The embedding dependency for vector search. + EmbeddingDependency OnYourDataVectorizationSourceClassification + + // The index field mapping options of Elasticsearch®. + FieldsMapping *ElasticsearchIndexFieldMappingOptions + + // Whether queries should be restricted to use of indexed data. + InScope *bool + + // The query type of Elasticsearch®. + QueryType *ElasticsearchQueryType + + // Give the model instructions about how it should behave and any context it should reference when generating a response. + // You can describe the assistant's personality and tell it how to format responses. + // There's a 100 token limit for it, and it counts against the overall token limit. + RoleInformation *string + + // The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but + // lower recall of the answer. + Strictness *int32 + + // The configured top number of documents to feature for the configured query. + TopNDocuments *int32 +} + +// ElasticsearchIndexFieldMappingOptions - Optional settings to control how fields are processed when using a configured Elasticsearch® +// resource. +type ElasticsearchIndexFieldMappingOptions struct { + // The names of index fields that should be treated as content. + ContentFields []string + + // The separator pattern that content fields should use. + ContentFieldsSeparator *string + + // The name of the index field to use as a filepath. + FilepathField *string + + // The name of the index field to use as a title. + TitleField *string + + // The name of the index field to use as a URL. + URLField *string + + // The names of fields that represent vector data. + VectorFields []string } // EmbeddingItem - Representation of a single embeddings relatedness comparison. @@ -826,14 +1254,15 @@ type EmbeddingsOptions struct { // inferior results when newlines are present. Input []string - // REQUIRED: Deployment specifies the name of the deployment (for Azure OpenAI) or model (for OpenAI) to use for this request. - Deployment string + // The model name to provide as part of this embeddings request. Not applicable to Azure OpenAI, where deployment information + // should be included in the Azure resource URI that's connected to. + DeploymentName *string // An identifier for the caller or end user of the operation. This may be used for tracking or rate-limiting purposes. User *string } -// EmbeddingsUsage - Usage counts for tokens input using the embeddings API. +// EmbeddingsUsage - Measurement of the amount of tokens used in this request and response. type EmbeddingsUsage struct { // REQUIRED; Number of tokens sent in the original request. PromptTokens *int32 @@ -842,13 +1271,13 @@ type EmbeddingsUsage struct { TotalTokens *int32 } -// EmbeddingsUsageAutoGenerated - Measurement of the amount of tokens used in this request and response. -type EmbeddingsUsageAutoGenerated struct { - // REQUIRED; Number of tokens sent in the original request. - PromptTokens *int32 +// Error - The error object. +type Error struct { + // REQUIRED; One of a server-defined set of error codes. + Code *string - // REQUIRED; Total number of tokens transacted in this request/response. - TotalTokens *int32 + // REQUIRED; A human-readable representation of the error. + message *string } // FunctionCall - The name and arguments of a function that should be called, as generated by the model. @@ -872,15 +1301,22 @@ type FunctionDefinition struct { // its parameters. Description *string - // The parameters the functions accepts, described as a JSON Schema object. + // The parameters the function accepts, described as a JSON Schema object. Parameters any } -// FunctionName - A structure that specifies the exact name of a specific, request-provided function to use when processing -// a chat completions operation. -type FunctionName struct { - // REQUIRED; The name of the function to call. - Name *string +// ImageGenerationData - A representation of a single generated image, provided as either base64-encoded data or as a URL +// from which the image may be retrieved. +type ImageGenerationData struct { + // The complete data for an image, represented as a base64-encoded string. + Base64Data *string + + // The final prompt used by the model to generate the image. Only provided with dall-3-models and only when revisions were + // made to the prompt. + RevisedPrompt *string + + // The URL that provides temporary access to download the generated image. + URL *string } // ImageGenerationOptions - Represents the request data used to generate images. @@ -888,69 +1324,308 @@ type ImageGenerationOptions struct { // REQUIRED; A description of the desired images. Prompt *string - // The number of images to generate (defaults to 1). + // The model name or Azure OpenAI model deployment name to use for image generation. If not specified, dall-e-2 will be inferred + // as a default. + DeploymentName *string + + // The number of images to generate. Dall-e-2 models support values between 1 and 10. Dall-e-3 models only support a value + // of 1. N *int32 - // The format in which image generation response items should be presented. Azure OpenAI only supports URL response items. + // The desired image generation quality level to use. Only configurable with dall-e-3 models. + Quality *ImageGenerationQuality + + // The format in which image generation response items should be presented. ResponseFormat *ImageGenerationResponseFormat - // The desired size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 (defaults to 1024x1024). + // The desired dimensions for generated images. Dall-e-2 models support 256x256, 512x512, or 1024x1024. Dall-e-3 models support + // 1024x1024, 1792x1024, or 1024x1792. Size *ImageSize + // The desired image generation style to use. Only configurable with dall-e-3 models. + Style *ImageGenerationStyle + // A unique identifier representing your end-user, which can help to monitor and detect abuse. User *string } -// ImageGenerations - The result of the operation if the operation succeeded. +// ImageGenerations - The result of a successful image generation operation. type ImageGenerations struct { - // REQUIRED; A timestamp when this job or item was created (in unix epochs). + // REQUIRED; A timestamp representing when this operation was started. Expressed in seconds since the Unix epoch of 1970-01-01T00:00:00+0000. Created *time.Time - // REQUIRED; The images generated by the operator. - Data []ImageGenerationsDataItem + // REQUIRED; The images generated by the operation. + Data []ImageGenerationData } -// ImageLocation - An image response item that provides a URL from which an image may be accessed. -type ImageLocation struct { - // REQUIRED; The URL that provides temporary access to download the generated image. - URL *string +// MaxTokensFinishDetails - A structured representation of a stop reason that signifies a token limit was reached before the +// model could naturally complete. +type MaxTokensFinishDetails struct { + // REQUIRED; The object type. + Type *string } -// ImagePayload - An image response item that directly represents the image data as a base64-encoded string. -type ImagePayload struct { - // REQUIRED; The complete data for an image represented as a base64-encoded string. - B64JSON *string +// GetChatFinishDetails implements the ChatFinishDetailsClassification interface for type MaxTokensFinishDetails. +func (m *MaxTokensFinishDetails) GetChatFinishDetails() *ChatFinishDetails { + return &ChatFinishDetails{ + Type: m.Type, + } } -// PromptFilterResult - Content filtering results for a single prompt in the request. -type PromptFilterResult struct { - // REQUIRED; The index of this prompt in the set of prompt results - PromptIndex *int32 +// OnYourDataAPIKeyAuthenticationOptions - The authentication options for Azure OpenAI On Your Data when using an API key. +type OnYourDataAPIKeyAuthenticationOptions struct { + // REQUIRED; The authentication type. + configType *OnYourDataAuthenticationType - // Content filtering results for this prompt - ContentFilterResults *PromptFilterResultContentFilterResults + // REQUIRED; The API key to use for authentication. + Key *string } -// PromptFilterResultContentFilterResults - Content filtering results for this prompt -type PromptFilterResultContentFilterResults struct { - // Describes an error returned if the content filtering system is down or otherwise unable to complete the operation in time. - Error *ContentFilterResultsError +// GetOnYourDataAuthenticationOptions implements the OnYourDataAuthenticationOptionsClassification interface for type OnYourDataAPIKeyAuthenticationOptions. +func (o *OnYourDataAPIKeyAuthenticationOptions) GetOnYourDataAuthenticationOptions() *OnYourDataAuthenticationOptions { + return &OnYourDataAuthenticationOptions{ + configType: o.configType, + } +} - // Describes language attacks or uses that include pejorative or discriminatory language with reference to a person or identity - // group on the basis of certain differentiating attributes of these groups - // including but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation, religion, - // immigration status, ability status, personal appearance, and body size. - Hate *ContentFilterResult +// OnYourDataAuthenticationOptions - The authentication options for Azure OpenAI On Your Data. +type OnYourDataAuthenticationOptions struct { + // REQUIRED; The authentication type. + configType *OnYourDataAuthenticationType +} - // Describes language related to physical actions intended to purposely hurt, injure, or damage one’s body, or kill oneself. - SelfHarm *ContentFilterResult +// GetOnYourDataAuthenticationOptions implements the OnYourDataAuthenticationOptionsClassification interface for type OnYourDataAuthenticationOptions. +func (o *OnYourDataAuthenticationOptions) GetOnYourDataAuthenticationOptions() *OnYourDataAuthenticationOptions { + return o +} - // Describes language related to anatomical organs and genitals, romantic relationships, acts portrayed in erotic or affectionate - // terms, physical sexual acts, including those portrayed as an assault or a - // forced sexual violent act against one’s will, prostitution, pornography, and abuse. - Sexual *ContentFilterResult +// OnYourDataConnectionStringAuthenticationOptions - The authentication options for Azure OpenAI On Your Data when using a +// connection string. +type OnYourDataConnectionStringAuthenticationOptions struct { + // REQUIRED; The connection string to use for authentication. + ConnectionString *string - // Describes language related to physical actions intended to hurt, injure, damage, or kill someone or something; describes - // weapons, etc. - Violence *ContentFilterResult + // REQUIRED; The authentication type. + configType *OnYourDataAuthenticationType +} + +// GetOnYourDataAuthenticationOptions implements the OnYourDataAuthenticationOptionsClassification interface for type OnYourDataConnectionStringAuthenticationOptions. +func (o *OnYourDataConnectionStringAuthenticationOptions) GetOnYourDataAuthenticationOptions() *OnYourDataAuthenticationOptions { + return &OnYourDataAuthenticationOptions{ + configType: o.configType, + } +} + +// OnYourDataDeploymentNameVectorizationSource - The details of a a vectorization source, used by Azure OpenAI On Your Data +// when applying vector search, that is based on an internal embeddings model deployment name in the same Azure OpenAI resource. +type OnYourDataDeploymentNameVectorizationSource struct { + // REQUIRED; The embedding model deployment name within the same Azure OpenAI resource. This enables you to use vector search + // without Azure OpenAI api-key and without Azure OpenAI public network access. + DeploymentName *string + + // REQUIRED; The type of vectorization source to use. + Type *OnYourDataVectorizationSourceType +} + +// GetOnYourDataVectorizationSource implements the OnYourDataVectorizationSourceClassification interface for type OnYourDataDeploymentNameVectorizationSource. +func (o *OnYourDataDeploymentNameVectorizationSource) GetOnYourDataVectorizationSource() *OnYourDataVectorizationSource { + return &OnYourDataVectorizationSource{ + Type: o.Type, + } +} + +// OnYourDataEndpointVectorizationSource - The details of a a vectorization source, used by Azure OpenAI On Your Data when +// applying vector search, that is based on a public Azure OpenAI endpoint call for embeddings. +type OnYourDataEndpointVectorizationSource struct { + // REQUIRED; Specifies the authentication options to use when retrieving embeddings from the specified endpoint. + Authentication OnYourDataAuthenticationOptionsClassification + + // REQUIRED; Specifies the resource endpoint URL from which embeddings should be retrieved. It should be in the format of + // https://YOURRESOURCENAME.openai.azure.com/openai/deployments/YOURDEPLOYMENTNAME/embeddings. + // The api-version query parameter is not allowed. + Endpoint *string + + // REQUIRED; The type of vectorization source to use. + Type *OnYourDataVectorizationSourceType +} + +// GetOnYourDataVectorizationSource implements the OnYourDataVectorizationSourceClassification interface for type OnYourDataEndpointVectorizationSource. +func (o *OnYourDataEndpointVectorizationSource) GetOnYourDataVectorizationSource() *OnYourDataVectorizationSource { + return &OnYourDataVectorizationSource{ + Type: o.Type, + } +} + +// OnYourDataKeyAndKeyIDAuthenticationOptions - The authentication options for Azure OpenAI On Your Data when using an Elasticsearch +// key and key ID pair. +type OnYourDataKeyAndKeyIDAuthenticationOptions struct { + // REQUIRED; The authentication type. + configType *OnYourDataAuthenticationType + + // REQUIRED; The key to use for authentication. + Key *string + + // REQUIRED; The key ID to use for authentication. + KeyID *string +} + +// GetOnYourDataAuthenticationOptions implements the OnYourDataAuthenticationOptionsClassification interface for type OnYourDataKeyAndKeyIDAuthenticationOptions. +func (o *OnYourDataKeyAndKeyIDAuthenticationOptions) GetOnYourDataAuthenticationOptions() *OnYourDataAuthenticationOptions { + return &OnYourDataAuthenticationOptions{ + configType: o.configType, + } +} + +// OnYourDataModelIDVectorizationSource - The details of a a vectorization source, used by Azure OpenAI On Your Data when +// applying vector search, that is based on a search service model ID. Currently only supported by Elasticsearch®. +type OnYourDataModelIDVectorizationSource struct { + // REQUIRED; The embedding model ID build inside the search service. Currently only supported by Elasticsearch®. + ModelID *string + + // REQUIRED; The type of vectorization source to use. + Type *OnYourDataVectorizationSourceType +} + +// GetOnYourDataVectorizationSource implements the OnYourDataVectorizationSourceClassification interface for type OnYourDataModelIDVectorizationSource. +func (o *OnYourDataModelIDVectorizationSource) GetOnYourDataVectorizationSource() *OnYourDataVectorizationSource { + return &OnYourDataVectorizationSource{ + Type: o.Type, + } +} + +// OnYourDataSystemAssignedManagedIdentityAuthenticationOptions - The authentication options for Azure OpenAI On Your Data +// when using a system-assigned managed identity. +type OnYourDataSystemAssignedManagedIdentityAuthenticationOptions struct { + // REQUIRED; The authentication type. + configType *OnYourDataAuthenticationType +} + +// GetOnYourDataAuthenticationOptions implements the OnYourDataAuthenticationOptionsClassification interface for type OnYourDataSystemAssignedManagedIdentityAuthenticationOptions. +func (o *OnYourDataSystemAssignedManagedIdentityAuthenticationOptions) GetOnYourDataAuthenticationOptions() *OnYourDataAuthenticationOptions { + return &OnYourDataAuthenticationOptions{ + configType: o.configType, + } +} + +// OnYourDataUserAssignedManagedIdentityAuthenticationOptions - The authentication options for Azure OpenAI On Your Data when +// using a user-assigned managed identity. +type OnYourDataUserAssignedManagedIdentityAuthenticationOptions struct { + // REQUIRED; The authentication type. + configType *OnYourDataAuthenticationType + + // REQUIRED; The resource ID of the user-assigned managed identity to use for authentication. + ManagedIdentityResourceID *string +} + +// GetOnYourDataAuthenticationOptions implements the OnYourDataAuthenticationOptionsClassification interface for type OnYourDataUserAssignedManagedIdentityAuthenticationOptions. +func (o *OnYourDataUserAssignedManagedIdentityAuthenticationOptions) GetOnYourDataAuthenticationOptions() *OnYourDataAuthenticationOptions { + return &OnYourDataAuthenticationOptions{ + configType: o.configType, + } +} + +// OnYourDataVectorizationSource - An abstract representation of a vectorization source for Azure OpenAI On Your Data with +// vector search. +type OnYourDataVectorizationSource struct { + // REQUIRED; The type of vectorization source to use. + Type *OnYourDataVectorizationSourceType +} + +// GetOnYourDataVectorizationSource implements the OnYourDataVectorizationSourceClassification interface for type OnYourDataVectorizationSource. +func (o *OnYourDataVectorizationSource) GetOnYourDataVectorizationSource() *OnYourDataVectorizationSource { + return o +} + +// PineconeChatExtensionConfiguration - A specific representation of configurable options for Elasticsearch when using it +// as an Azure OpenAI chat extension. +type PineconeChatExtensionConfiguration struct { + // REQUIRED; The label for the type of an Azure chat extension. This typically corresponds to a matching Azure resource. Azure + // chat extensions are only compatible with Azure OpenAI. + configType *AzureChatExtensionType + + // REQUIRED; The parameters to use when configuring Azure OpenAI chat extensions. + Parameters *PineconeChatExtensionParameters +} + +// GetAzureChatExtensionConfiguration implements the AzureChatExtensionConfigurationClassification interface for type PineconeChatExtensionConfiguration. +func (p *PineconeChatExtensionConfiguration) GetAzureChatExtensionConfiguration() *AzureChatExtensionConfiguration { + return &AzureChatExtensionConfiguration{ + configType: p.configType, + } +} + +// PineconeChatExtensionParameters - Parameters for configuring Azure OpenAI Pinecone chat extensions. +type PineconeChatExtensionParameters struct { + // REQUIRED; The environment name of Pinecone. + Environment *string + + // REQUIRED; Customized field mapping behavior to use when interacting with the search index. + FieldsMapping *PineconeFieldMappingOptions + + // REQUIRED; The name of the Pinecone database index. + IndexName *string + + // The authentication method to use when accessing the defined data source. Each data source type supports a specific set + // of available authentication methods; please see the documentation of the data + // source for supported mechanisms. If not otherwise provided, On Your Data will attempt to use System Managed Identity (default + // credential) authentication. + Authentication OnYourDataAuthenticationOptionsClassification + + // The embedding dependency for vector search. + EmbeddingDependency OnYourDataVectorizationSourceClassification + + // Whether queries should be restricted to use of indexed data. + InScope *bool + + // Give the model instructions about how it should behave and any context it should reference when generating a response. + // You can describe the assistant's personality and tell it how to format responses. + // There's a 100 token limit for it, and it counts against the overall token limit. + RoleInformation *string + + // The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but + // lower recall of the answer. + Strictness *int32 + + // The configured top number of documents to feature for the configured query. + TopNDocuments *int32 +} + +// PineconeFieldMappingOptions - Optional settings to control how fields are processed when using a configured Pinecone resource. +type PineconeFieldMappingOptions struct { + // The names of index fields that should be treated as content. + ContentFields []string + + // The separator pattern that content fields should use. + ContentFieldsSeparator *string + + // The name of the index field to use as a filepath. + FilepathField *string + + // The names of fields that represent image vector data. + ImageVectorFields []string + + // The name of the index field to use as a title. + TitleField *string + + // The name of the index field to use as a URL. + URLField *string + + // The names of fields that represent vector data. + VectorFields []string +} + +// StopFinishDetails - A structured representation of a stop reason that signifies natural termination by the model. +type StopFinishDetails struct { + // REQUIRED; The token sequence that the model terminated with. + Stop *string + + // REQUIRED; The object type. + Type *string +} + +// GetChatFinishDetails implements the ChatFinishDetailsClassification interface for type StopFinishDetails. +func (s *StopFinishDetails) GetChatFinishDetails() *ChatFinishDetails { + return &ChatFinishDetails{ + Type: s.Type, + } } diff --git a/sdk/ai/azopenai/models_serde.go b/sdk/ai/azopenai/models_serde.go index 5369326e3421..bd6dbe637ea3 100644 --- a/sdk/ai/azopenai/models_serde.go +++ b/sdk/ai/azopenai/models_serde.go @@ -63,9 +63,10 @@ func (a *AudioTranscription) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type AudioTranscriptionOptions. func (a AudioTranscriptionOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "model", a.DeploymentName) populateByteArray(objectMap, "file", a.File, runtime.Base64StdFormat) + populate(objectMap, "filename", a.Filename) populate(objectMap, "language", a.Language) - populate(objectMap, "model", &a.Deployment) populate(objectMap, "prompt", a.Prompt) populate(objectMap, "response_format", a.ResponseFormat) populate(objectMap, "temperature", a.Temperature) @@ -81,15 +82,18 @@ func (a *AudioTranscriptionOptions) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "model": + err = unpopulate(val, "DeploymentName", &a.DeploymentName) + delete(rawMsg, key) case "file": err = runtime.DecodeByteArray(string(val), &a.File, runtime.Base64StdFormat) delete(rawMsg, key) + case "filename": + err = unpopulate(val, "Filename", &a.Filename) + delete(rawMsg, key) case "language": err = unpopulate(val, "Language", &a.Language) delete(rawMsg, key) - case "model": - err = unpopulate(val, "Model", &a.Deployment) - delete(rawMsg, key) case "prompt": err = unpopulate(val, "Prompt", &a.Prompt) delete(rawMsg, key) @@ -216,8 +220,9 @@ func (a *AudioTranslation) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type AudioTranslationOptions. func (a AudioTranslationOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "model", a.DeploymentName) populateByteArray(objectMap, "file", a.File, runtime.Base64StdFormat) - populate(objectMap, "model", &a.Deployment) + populate(objectMap, "filename", a.Filename) populate(objectMap, "prompt", a.Prompt) populate(objectMap, "response_format", a.ResponseFormat) populate(objectMap, "temperature", a.Temperature) @@ -233,11 +238,14 @@ func (a *AudioTranslationOptions) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "model": + err = unpopulate(val, "DeploymentName", &a.DeploymentName) + delete(rawMsg, key) case "file": err = runtime.DecodeByteArray(string(val), &a.File, runtime.Base64StdFormat) delete(rawMsg, key) - case "model": - err = unpopulate(val, "Model", &a.Deployment) + case "filename": + err = unpopulate(val, "Filename", &a.Filename) delete(rawMsg, key) case "prompt": err = unpopulate(val, "Prompt", &a.Prompt) @@ -319,11 +327,68 @@ func (a *AudioTranslationSegment) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type AzureChatEnhancementConfiguration. +func (a AzureChatEnhancementConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "grounding", a.Grounding) + populate(objectMap, "ocr", a.Ocr) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureChatEnhancementConfiguration. +func (a *AzureChatEnhancementConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "grounding": + err = unpopulate(val, "Grounding", &a.Grounding) + delete(rawMsg, key) + case "ocr": + err = unpopulate(val, "Ocr", &a.Ocr) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AzureChatEnhancements. +func (a AzureChatEnhancements) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "grounding", a.Grounding) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureChatEnhancements. +func (a *AzureChatEnhancements) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "grounding": + err = unpopulate(val, "Grounding", &a.Grounding) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type AzureChatExtensionConfiguration. func (a AzureChatExtensionConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateAny(objectMap, "parameters", a.Parameters) - populate(objectMap, "type", a.Type) + objectMap["type"] = a.configType return json.Marshal(objectMap) } @@ -336,11 +401,8 @@ func (a *AzureChatExtensionConfiguration) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "parameters": - err = unpopulate(val, "Parameters", &a.Parameters) - delete(rawMsg, key) case "type": - err = unpopulate(val, "Type", &a.Type) + err = unpopulate(val, "configType", &a.configType) delete(rawMsg, key) } if err != nil { @@ -377,25 +439,111 @@ func (a *AzureChatExtensionsMessageContext) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type AzureChatGroundingEnhancementConfiguration. +func (a AzureChatGroundingEnhancementConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enabled", a.Enabled) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureChatGroundingEnhancementConfiguration. +func (a *AzureChatGroundingEnhancementConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enabled": + err = unpopulate(val, "Enabled", &a.Enabled) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AzureChatOCREnhancementConfiguration. +func (a AzureChatOCREnhancementConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enabled", a.Enabled) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureChatOCREnhancementConfiguration. +func (a *AzureChatOCREnhancementConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enabled": + err = unpopulate(val, "Enabled", &a.Enabled) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type AzureCognitiveSearchChatExtensionConfiguration. func (a AzureCognitiveSearchChatExtensionConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "embeddingEndpoint", a.EmbeddingEndpoint) - populate(objectMap, "embeddingKey", a.EmbeddingKey) + objectMap["type"] = AzureChatExtensionTypeAzureCognitiveSearch + populate(objectMap, "parameters", a.Parameters) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureCognitiveSearchChatExtensionConfiguration. +func (a *AzureCognitiveSearchChatExtensionConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "type": + err = unpopulate(val, "configType", &a.configType) + delete(rawMsg, key) + case "parameters": + err = unpopulate(val, "Parameters", &a.Parameters) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AzureCognitiveSearchChatExtensionParameters. +func (a AzureCognitiveSearchChatExtensionParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "authentication", a.Authentication) + populate(objectMap, "embeddingDependency", a.EmbeddingDependency) populate(objectMap, "endpoint", a.Endpoint) populate(objectMap, "fieldsMapping", a.FieldsMapping) + populate(objectMap, "filter", a.Filter) populate(objectMap, "inScope", a.InScope) populate(objectMap, "indexName", a.IndexName) - populate(objectMap, "key", a.Key) populate(objectMap, "queryType", a.QueryType) + populate(objectMap, "roleInformation", a.RoleInformation) populate(objectMap, "semanticConfiguration", a.SemanticConfiguration) + populate(objectMap, "strictness", a.Strictness) populate(objectMap, "topNDocuments", a.TopNDocuments) - populate(objectMap, "type", a.Type) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type AzureCognitiveSearchChatExtensionConfiguration. -func (a *AzureCognitiveSearchChatExtensionConfiguration) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureCognitiveSearchChatExtensionParameters. +func (a *AzureCognitiveSearchChatExtensionParameters) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", a, err) @@ -403,11 +551,11 @@ func (a *AzureCognitiveSearchChatExtensionConfiguration) UnmarshalJSON(data []by for key, val := range rawMsg { var err error switch key { - case "embeddingEndpoint": - err = unpopulate(val, "EmbeddingEndpoint", &a.EmbeddingEndpoint) + case "authentication": + a.Authentication, err = unmarshalOnYourDataAuthenticationOptionsClassification(val) delete(rawMsg, key) - case "embeddingKey": - err = unpopulate(val, "EmbeddingKey", &a.EmbeddingKey) + case "embeddingDependency": + a.EmbeddingDependency, err = unmarshalOnYourDataVectorizationSourceClassification(val) delete(rawMsg, key) case "endpoint": err = unpopulate(val, "Endpoint", &a.Endpoint) @@ -415,27 +563,30 @@ func (a *AzureCognitiveSearchChatExtensionConfiguration) UnmarshalJSON(data []by case "fieldsMapping": err = unpopulate(val, "FieldsMapping", &a.FieldsMapping) delete(rawMsg, key) + case "filter": + err = unpopulate(val, "Filter", &a.Filter) + delete(rawMsg, key) case "inScope": err = unpopulate(val, "InScope", &a.InScope) delete(rawMsg, key) case "indexName": err = unpopulate(val, "IndexName", &a.IndexName) delete(rawMsg, key) - case "key": - err = unpopulate(val, "Key", &a.Key) - delete(rawMsg, key) case "queryType": err = unpopulate(val, "QueryType", &a.QueryType) delete(rawMsg, key) + case "roleInformation": + err = unpopulate(val, "RoleInformation", &a.RoleInformation) + delete(rawMsg, key) case "semanticConfiguration": err = unpopulate(val, "SemanticConfiguration", &a.SemanticConfiguration) delete(rawMsg, key) + case "strictness": + err = unpopulate(val, "Strictness", &a.Strictness) + delete(rawMsg, key) case "topNDocuments": err = unpopulate(val, "TopNDocuments", &a.TopNDocuments) delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &a.Type) - delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", a, err) @@ -444,20 +595,21 @@ func (a *AzureCognitiveSearchChatExtensionConfiguration) UnmarshalJSON(data []by return nil } -// MarshalJSON implements the json.Marshaller interface for type AzureCognitiveSearchChatExtensionConfigurationFieldsMapping. -func (a AzureCognitiveSearchChatExtensionConfigurationFieldsMapping) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AzureCognitiveSearchIndexFieldMappingOptions. +func (a AzureCognitiveSearchIndexFieldMappingOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "contentFieldNames", a.ContentFieldNames) - populate(objectMap, "contentFieldSeparator", a.ContentFieldSeparator) + populate(objectMap, "contentFields", a.ContentFields) + populate(objectMap, "contentFieldsSeparator", a.ContentFieldsSeparator) populate(objectMap, "filepathField", a.FilepathField) + populate(objectMap, "imageVectorFields", a.ImageVectorFields) populate(objectMap, "titleField", a.TitleField) populate(objectMap, "urlField", a.URLField) populate(objectMap, "vectorFields", a.VectorFields) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type AzureCognitiveSearchChatExtensionConfigurationFieldsMapping. -func (a *AzureCognitiveSearchChatExtensionConfigurationFieldsMapping) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureCognitiveSearchIndexFieldMappingOptions. +func (a *AzureCognitiveSearchIndexFieldMappingOptions) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", a, err) @@ -465,15 +617,18 @@ func (a *AzureCognitiveSearchChatExtensionConfigurationFieldsMapping) UnmarshalJ for key, val := range rawMsg { var err error switch key { - case "contentFieldNames": - err = unpopulate(val, "ContentFieldNames", &a.ContentFieldNames) + case "contentFields": + err = unpopulate(val, "ContentFields", &a.ContentFields) delete(rawMsg, key) - case "contentFieldSeparator": - err = unpopulate(val, "ContentFieldSeparator", &a.ContentFieldSeparator) + case "contentFieldsSeparator": + err = unpopulate(val, "ContentFieldsSeparator", &a.ContentFieldsSeparator) delete(rawMsg, key) case "filepathField": err = unpopulate(val, "FilepathField", &a.FilepathField) delete(rawMsg, key) + case "imageVectorFields": + err = unpopulate(val, "ImageVectorFields", &a.ImageVectorFields) + delete(rawMsg, key) case "titleField": err = unpopulate(val, "TitleField", &a.TitleField) delete(rawMsg, key) @@ -491,20 +646,16 @@ func (a *AzureCognitiveSearchChatExtensionConfigurationFieldsMapping) UnmarshalJ return nil } -// MarshalJSON implements the json.Marshaller interface for type AzureCognitiveSearchIndexFieldMappingOptions. -func (a AzureCognitiveSearchIndexFieldMappingOptions) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AzureCosmosDBChatExtensionConfiguration. +func (a AzureCosmosDBChatExtensionConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "contentFieldNames", a.ContentFieldNames) - populate(objectMap, "contentFieldSeparator", a.ContentFieldSeparator) - populate(objectMap, "filepathField", a.FilepathField) - populate(objectMap, "titleField", a.TitleField) - populate(objectMap, "urlField", a.URLField) - populate(objectMap, "vectorFields", a.VectorFields) + objectMap["type"] = AzureChatExtensionTypeAzureCosmosDB + populate(objectMap, "parameters", a.Parameters) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type AzureCognitiveSearchIndexFieldMappingOptions. -func (a *AzureCognitiveSearchIndexFieldMappingOptions) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureCosmosDBChatExtensionConfiguration. +func (a *AzureCosmosDBChatExtensionConfiguration) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", a, err) @@ -512,23 +663,11 @@ func (a *AzureCognitiveSearchIndexFieldMappingOptions) UnmarshalJSON(data []byte for key, val := range rawMsg { var err error switch key { - case "contentFieldNames": - err = unpopulate(val, "ContentFieldNames", &a.ContentFieldNames) - delete(rawMsg, key) - case "contentFieldSeparator": - err = unpopulate(val, "ContentFieldSeparator", &a.ContentFieldSeparator) - delete(rawMsg, key) - case "filepathField": - err = unpopulate(val, "FilepathField", &a.FilepathField) - delete(rawMsg, key) - case "titleField": - err = unpopulate(val, "TitleField", &a.TitleField) - delete(rawMsg, key) - case "urlField": - err = unpopulate(val, "URLField", &a.URLField) + case "type": + err = unpopulate(val, "configType", &a.configType) delete(rawMsg, key) - case "vectorFields": - err = unpopulate(val, "VectorFields", &a.VectorFields) + case "parameters": + err = unpopulate(val, "Parameters", &a.Parameters) delete(rawMsg, key) } if err != nil { @@ -538,19 +677,24 @@ func (a *AzureCognitiveSearchIndexFieldMappingOptions) UnmarshalJSON(data []byte return nil } -// MarshalJSON implements the json.Marshaller interface for type Error. -func (a Error) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AzureCosmosDBChatExtensionParameters. +func (a AzureCosmosDBChatExtensionParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "code", a.Code) - populate(objectMap, "details", a.Details) - populate(objectMap, "innererror", a.InnerError) - populate(objectMap, "message", a.Message) - populate(objectMap, "target", a.Target) + populate(objectMap, "authentication", a.Authentication) + populate(objectMap, "containerName", a.ContainerName) + populate(objectMap, "databaseName", a.DatabaseName) + populate(objectMap, "embeddingDependency", a.EmbeddingDependency) + populate(objectMap, "fieldsMapping", a.FieldsMapping) + populate(objectMap, "inScope", a.InScope) + populate(objectMap, "indexName", a.IndexName) + populate(objectMap, "roleInformation", a.RoleInformation) + populate(objectMap, "strictness", a.Strictness) + populate(objectMap, "topNDocuments", a.TopNDocuments) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Error. -func (a *Error) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureCosmosDBChatExtensionParameters. +func (a *AzureCosmosDBChatExtensionParameters) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", a, err) @@ -558,20 +702,35 @@ func (a *Error) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "code": - err = unpopulate(val, "Code", &a.Code) + case "authentication": + a.Authentication, err = unmarshalOnYourDataAuthenticationOptionsClassification(val) delete(rawMsg, key) - case "details": - err = unpopulate(val, "Details", &a.Details) + case "containerName": + err = unpopulate(val, "ContainerName", &a.ContainerName) delete(rawMsg, key) - case "innererror": - err = unpopulate(val, "Innererror", &a.InnerError) + case "databaseName": + err = unpopulate(val, "DatabaseName", &a.DatabaseName) delete(rawMsg, key) - case "message": - err = unpopulate(val, "Message", &a.Message) + case "embeddingDependency": + a.EmbeddingDependency, err = unmarshalOnYourDataVectorizationSourceClassification(val) + delete(rawMsg, key) + case "fieldsMapping": + err = unpopulate(val, "FieldsMapping", &a.FieldsMapping) + delete(rawMsg, key) + case "inScope": + err = unpopulate(val, "InScope", &a.InScope) + delete(rawMsg, key) + case "indexName": + err = unpopulate(val, "IndexName", &a.IndexName) delete(rawMsg, key) - case "target": - err = unpopulate(val, "Target", &a.Target) + case "roleInformation": + err = unpopulate(val, "RoleInformation", &a.RoleInformation) + delete(rawMsg, key) + case "strictness": + err = unpopulate(val, "Strictness", &a.Strictness) + delete(rawMsg, key) + case "topNDocuments": + err = unpopulate(val, "TopNDocuments", &a.TopNDocuments) delete(rawMsg, key) } if err != nil { @@ -581,16 +740,15 @@ func (a *Error) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type InnerError. -func (a InnerError) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AzureCosmosDBFieldMappingOptions. +func (a AzureCosmosDBFieldMappingOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "code", a.Code) - populate(objectMap, "innererror", a.InnerError) + populate(objectMap, "vectorFields", a.VectorFields) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type InnerError. -func (a *InnerError) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureCosmosDBFieldMappingOptions. +func (a *AzureCosmosDBFieldMappingOptions) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", a, err) @@ -598,11 +756,8 @@ func (a *InnerError) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "code": - err = unpopulate(val, "Code", &a.Code) - delete(rawMsg, key) - case "innererror": - err = unpopulate(val, "Innererror", &a.InnerError) + case "vectorFields": + err = unpopulate(val, "VectorFields", &a.VectorFields) delete(rawMsg, key) } if err != nil { @@ -612,294 +767,239 @@ func (a *InnerError) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type batchImageGenerationOperationResponse. -func (b batchImageGenerationOperationResponse) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AzureGroundingEnhancement. +func (a AzureGroundingEnhancement) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateTimeUnix(objectMap, "created", b.Created) - populate(objectMap, "error", b.Error) - populate(objectMap, "expires", b.Expires) - populate(objectMap, "id", b.ID) - populate(objectMap, "result", b.Result) - populate(objectMap, "status", b.Status) + populate(objectMap, "lines", a.Lines) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type batchImageGenerationOperationResponse. -func (b *batchImageGenerationOperationResponse) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureGroundingEnhancement. +func (a *AzureGroundingEnhancement) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", b, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } for key, val := range rawMsg { var err error switch key { - case "created": - err = unpopulateTimeUnix(val, "Created", &b.Created) - delete(rawMsg, key) - case "error": - err = unpopulate(val, "Error", &b.Error) - delete(rawMsg, key) - case "expires": - err = unpopulate(val, "Expires", &b.Expires) - delete(rawMsg, key) - case "id": - err = unpopulate(val, "ID", &b.ID) - delete(rawMsg, key) - case "result": - err = unpopulate(val, "Result", &b.Result) - delete(rawMsg, key) - case "status": - err = unpopulate(val, "Status", &b.Status) + case "lines": + err = unpopulate(val, "Lines", &a.Lines) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", b, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ChatChoice. -func (c ChatChoice) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AzureGroundingEnhancementCoordinatePoint. +func (a AzureGroundingEnhancementCoordinatePoint) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "content_filter_results", c.ContentFilterResults) - populate(objectMap, "delta", c.Delta) - populate(objectMap, "finish_reason", c.FinishReason) - populate(objectMap, "index", c.Index) - populate(objectMap, "message", c.Message) + populate(objectMap, "x", a.X) + populate(objectMap, "y", a.Y) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ChatChoice. -func (c *ChatChoice) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureGroundingEnhancementCoordinatePoint. +func (a *AzureGroundingEnhancementCoordinatePoint) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } for key, val := range rawMsg { var err error switch key { - case "content_filter_results": - err = unpopulate(val, "ContentFilterResults", &c.ContentFilterResults) - delete(rawMsg, key) - case "delta": - err = unpopulate(val, "Delta", &c.Delta) - delete(rawMsg, key) - case "finish_reason": - err = unpopulate(val, "FinishReason", &c.FinishReason) - delete(rawMsg, key) - case "index": - err = unpopulate(val, "Index", &c.Index) + case "x": + err = unpopulate(val, "X", &a.X) delete(rawMsg, key) - case "message": - err = unpopulate(val, "Message", &c.Message) + case "y": + err = unpopulate(val, "Y", &a.Y) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ChatChoiceContentFilterResults. -func (c ChatChoiceContentFilterResults) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AzureGroundingEnhancementLine. +func (a AzureGroundingEnhancementLine) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "error", c.Error) - populate(objectMap, "hate", c.Hate) - populate(objectMap, "self_harm", c.SelfHarm) - populate(objectMap, "sexual", c.Sexual) - populate(objectMap, "violence", c.Violence) + populate(objectMap, "spans", a.Spans) + populate(objectMap, "text", a.Text) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ChatChoiceContentFilterResults. -func (c *ChatChoiceContentFilterResults) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureGroundingEnhancementLine. +func (a *AzureGroundingEnhancementLine) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } for key, val := range rawMsg { var err error switch key { - case "error": - err = unpopulate(val, "Error", &c.Error) - delete(rawMsg, key) - case "hate": - err = unpopulate(val, "Hate", &c.Hate) - delete(rawMsg, key) - case "self_harm": - err = unpopulate(val, "SelfHarm", &c.SelfHarm) - delete(rawMsg, key) - case "sexual": - err = unpopulate(val, "Sexual", &c.Sexual) + case "spans": + err = unpopulate(val, "Spans", &a.Spans) delete(rawMsg, key) - case "violence": - err = unpopulate(val, "Violence", &c.Violence) + case "text": + err = unpopulate(val, "Text", &a.Text) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ChatChoiceDelta. -func (c ChatChoiceDelta) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AzureGroundingEnhancementLineSpan. +func (a AzureGroundingEnhancementLineSpan) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "content", c.Content) - populate(objectMap, "context", c.Context) - populate(objectMap, "function_call", c.FunctionCall) - populate(objectMap, "name", c.Name) - populate(objectMap, "role", c.Role) + populate(objectMap, "length", a.Length) + populate(objectMap, "offset", a.Offset) + populate(objectMap, "polygon", a.Polygon) + populate(objectMap, "text", a.Text) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ChatChoiceDelta. -func (c *ChatChoiceDelta) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureGroundingEnhancementLineSpan. +func (a *AzureGroundingEnhancementLineSpan) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } for key, val := range rawMsg { var err error switch key { - case "content": - err = unpopulate(val, "Content", &c.Content) + case "length": + err = unpopulate(val, "Length", &a.Length) delete(rawMsg, key) - case "context": - err = unpopulate(val, "Context", &c.Context) + case "offset": + err = unpopulate(val, "Offset", &a.Offset) delete(rawMsg, key) - case "function_call": - err = unpopulate(val, "FunctionCall", &c.FunctionCall) + case "polygon": + err = unpopulate(val, "Polygon", &a.Polygon) delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &c.Name) - delete(rawMsg, key) - case "role": - err = unpopulate(val, "Role", &c.Role) + case "text": + err = unpopulate(val, "Text", &a.Text) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ChatChoiceMessage. -func (c ChatChoiceMessage) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AzureMachineLearningIndexChatExtensionConfiguration. +func (a AzureMachineLearningIndexChatExtensionConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "content", c.Content) - populate(objectMap, "context", c.Context) - populate(objectMap, "function_call", c.FunctionCall) - populate(objectMap, "name", c.Name) - populate(objectMap, "role", c.Role) + objectMap["type"] = AzureChatExtensionTypeAzureMachineLearningIndex + populate(objectMap, "parameters", a.Parameters) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ChatChoiceMessage. -func (c *ChatChoiceMessage) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureMachineLearningIndexChatExtensionConfiguration. +func (a *AzureMachineLearningIndexChatExtensionConfiguration) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } for key, val := range rawMsg { var err error switch key { - case "content": - err = unpopulate(val, "Content", &c.Content) - delete(rawMsg, key) - case "context": - err = unpopulate(val, "Context", &c.Context) - delete(rawMsg, key) - case "function_call": - err = unpopulate(val, "FunctionCall", &c.FunctionCall) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &c.Name) + case "type": + err = unpopulate(val, "configType", &a.configType) delete(rawMsg, key) - case "role": - err = unpopulate(val, "Role", &c.Role) + case "parameters": + err = unpopulate(val, "Parameters", &a.Parameters) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ChatCompletions. -func (c ChatCompletions) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type AzureMachineLearningIndexChatExtensionParameters. +func (a AzureMachineLearningIndexChatExtensionParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "choices", c.Choices) - populateTimeUnix(objectMap, "created", c.Created) - populate(objectMap, "id", c.ID) - populate(objectMap, "prompt_filter_results", c.PromptFilterResults) - populate(objectMap, "usage", c.Usage) + populate(objectMap, "authentication", a.Authentication) + populate(objectMap, "filter", a.Filter) + populate(objectMap, "inScope", a.InScope) + populate(objectMap, "name", a.Name) + populate(objectMap, "projectResourceId", a.ProjectResourceID) + populate(objectMap, "roleInformation", a.RoleInformation) + populate(objectMap, "strictness", a.Strictness) + populate(objectMap, "topNDocuments", a.TopNDocuments) + populate(objectMap, "version", a.Version) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletions. -func (c *ChatCompletions) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureMachineLearningIndexChatExtensionParameters. +func (a *AzureMachineLearningIndexChatExtensionParameters) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } for key, val := range rawMsg { var err error switch key { - case "choices": - err = unpopulate(val, "Choices", &c.Choices) + case "authentication": + a.Authentication, err = unmarshalOnYourDataAuthenticationOptionsClassification(val) delete(rawMsg, key) - case "created": - err = unpopulateTimeUnix(val, "Created", &c.Created) + case "filter": + err = unpopulate(val, "Filter", &a.Filter) delete(rawMsg, key) - case "id": - err = unpopulate(val, "ID", &c.ID) + case "inScope": + err = unpopulate(val, "InScope", &a.InScope) delete(rawMsg, key) - case "prompt_annotations": - fallthrough - case "prompt_filter_results": - err = unpopulate(val, "PromptFilterResults", &c.PromptFilterResults) + case "name": + err = unpopulate(val, "Name", &a.Name) delete(rawMsg, key) - case "usage": - err = unpopulate(val, "Usage", &c.Usage) + case "projectResourceId": + err = unpopulate(val, "ProjectResourceID", &a.ProjectResourceID) + delete(rawMsg, key) + case "roleInformation": + err = unpopulate(val, "RoleInformation", &a.RoleInformation) + delete(rawMsg, key) + case "strictness": + err = unpopulate(val, "Strictness", &a.Strictness) + delete(rawMsg, key) + case "topNDocuments": + err = unpopulate(val, "TopNDocuments", &a.TopNDocuments) + delete(rawMsg, key) + case "version": + err = unpopulate(val, "Version", &a.Version) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", a, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ChatCompletionsOptions. -func (c ChatCompletionsOptions) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ChatChoice. +func (c ChatChoice) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - if c.AzureExtensionsOptions != nil { - populate(objectMap, "dataSources", c.AzureExtensionsOptions.Extensions) - } - populate(objectMap, "frequency_penalty", c.FrequencyPenalty) - populate(objectMap, "function_call", c.FunctionCall) - populate(objectMap, "functions", c.Functions) - populate(objectMap, "logit_bias", c.LogitBias) - populate(objectMap, "max_tokens", c.MaxTokens) - populate(objectMap, "messages", c.Messages) - populate(objectMap, "model", &c.Deployment) - populate(objectMap, "n", c.N) - populate(objectMap, "presence_penalty", c.PresencePenalty) - populate(objectMap, "stop", c.Stop) - populate(objectMap, "temperature", c.Temperature) - populate(objectMap, "top_p", c.TopP) - populate(objectMap, "user", c.User) + populate(objectMap, "content_filter_results", c.ContentFilterResults) + populate(objectMap, "delta", c.Delta) + populate(objectMap, "enhancements", c.Enhancements) + populate(objectMap, "finish_details", c.FinishDetails) + populate(objectMap, "finish_reason", c.FinishReason) + populate(objectMap, "index", c.Index) + populate(objectMap, "message", c.Message) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletionsOptions. -func (c *ChatCompletionsOptions) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatChoice. +func (c *ChatChoice) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -907,48 +1007,26 @@ func (c *ChatCompletionsOptions) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "dataSources": - c.AzureExtensionsOptions = &AzureChatExtensionOptions{} - err = unpopulate(val, "DataSources", &c.AzureExtensionsOptions.Extensions) - delete(rawMsg, key) - case "frequency_penalty": - err = unpopulate(val, "FrequencyPenalty", &c.FrequencyPenalty) - delete(rawMsg, key) - case "function_call": - err = unpopulate(val, "FunctionCall", &c.FunctionCall) - delete(rawMsg, key) - case "functions": - err = unpopulate(val, "Functions", &c.Functions) - delete(rawMsg, key) - case "logit_bias": - err = unpopulate(val, "LogitBias", &c.LogitBias) - delete(rawMsg, key) - case "max_tokens": - err = unpopulate(val, "MaxTokens", &c.MaxTokens) - delete(rawMsg, key) - case "messages": - err = unpopulate(val, "Messages", &c.Messages) - delete(rawMsg, key) - case "model": - err = unpopulate(val, "Model", &c.Deployment) + case "content_filter_results": + err = unpopulate(val, "ContentFilterResults", &c.ContentFilterResults) delete(rawMsg, key) - case "n": - err = unpopulate(val, "N", &c.N) + case "delta": + err = unpopulate(val, "Delta", &c.Delta) delete(rawMsg, key) - case "presence_penalty": - err = unpopulate(val, "PresencePenalty", &c.PresencePenalty) + case "enhancements": + err = unpopulate(val, "Enhancements", &c.Enhancements) delete(rawMsg, key) - case "stop": - err = unpopulate(val, "Stop", &c.Stop) + case "finish_details": + c.FinishDetails, err = unmarshalChatFinishDetailsClassification(val) delete(rawMsg, key) - case "temperature": - err = unpopulate(val, "Temperature", &c.Temperature) + case "finish_reason": + err = unpopulate(val, "FinishReason", &c.FinishReason) delete(rawMsg, key) - case "top_p": - err = unpopulate(val, "TopP", &c.TopP) + case "index": + err = unpopulate(val, "Index", &c.Index) delete(rawMsg, key) - case "user": - err = unpopulate(val, "User", &c.User) + case "message": + err = unpopulate(val, "Message", &c.Message) delete(rawMsg, key) } if err != nil { @@ -958,19 +1036,15 @@ func (c *ChatCompletionsOptions) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ChatMessage. -func (c ChatMessage) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ChatCompletionRequestMessageContentPart. +func (c ChatCompletionRequestMessageContentPart) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "content", c.Content) - populate(objectMap, "context", c.Context) - populate(objectMap, "function_call", c.FunctionCall) - populate(objectMap, "name", c.Name) - populate(objectMap, "role", c.Role) + objectMap["type"] = c.partType return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ChatMessage. -func (c *ChatMessage) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletionRequestMessageContentPart. +func (c *ChatCompletionRequestMessageContentPart) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -978,20 +1052,8 @@ func (c *ChatMessage) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "content": - err = unpopulate(val, "Content", &c.Content) - delete(rawMsg, key) - case "context": - err = unpopulate(val, "Context", &c.Context) - delete(rawMsg, key) - case "function_call": - err = unpopulate(val, "FunctionCall", &c.FunctionCall) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &c.Name) - delete(rawMsg, key) - case "role": - err = unpopulate(val, "Role", &c.Role) + case "type": + err = unpopulate(val, "partType", &c.partType) delete(rawMsg, key) } if err != nil { @@ -1001,15 +1063,16 @@ func (c *ChatMessage) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ChatMessageContext. -func (c ChatMessageContext) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ChatCompletionRequestMessageContentPartImage. +func (c ChatCompletionRequestMessageContentPartImage) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "messages", c.Messages) + objectMap["type"] = ChatCompletionRequestMessageContentPartTypeImageURL + populate(objectMap, "image_url", c.ImageURL) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ChatMessageContext. -func (c *ChatMessageContext) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletionRequestMessageContentPartImage. +func (c *ChatCompletionRequestMessageContentPartImage) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -1017,8 +1080,11 @@ func (c *ChatMessageContext) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "messages": - err = unpopulate(val, "Messages", &c.Messages) + case "type": + err = unpopulate(val, "partType", &c.partType) + delete(rawMsg, key) + case "image_url": + err = unpopulate(val, "ImageURL", &c.ImageURL) delete(rawMsg, key) } if err != nil { @@ -1028,16 +1094,16 @@ func (c *ChatMessageContext) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ChatMessageFunctionCall. -func (c ChatMessageFunctionCall) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ChatCompletionRequestMessageContentPartImageURL. +func (c ChatCompletionRequestMessageContentPartImageURL) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "arguments", c.Arguments) - populate(objectMap, "name", c.Name) + populate(objectMap, "detail", c.Detail) + populate(objectMap, "url", c.URL) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ChatMessageFunctionCall. -func (c *ChatMessageFunctionCall) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletionRequestMessageContentPartImageURL. +func (c *ChatCompletionRequestMessageContentPartImageURL) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -1045,11 +1111,11 @@ func (c *ChatMessageFunctionCall) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "arguments": - err = unpopulate(val, "Arguments", &c.Arguments) + case "detail": + err = unpopulate(val, "Detail", &c.Detail) delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &c.Name) + case "url": + err = unpopulate(val, "URL", &c.URL) delete(rawMsg, key) } if err != nil { @@ -1059,19 +1125,16 @@ func (c *ChatMessageFunctionCall) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type Choice. -func (c Choice) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ChatCompletionRequestMessageContentPartText. +func (c ChatCompletionRequestMessageContentPartText) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "content_filter_results", c.ContentFilterResults) - populate(objectMap, "finish_reason", c.FinishReason) - populate(objectMap, "index", c.Index) - populate(objectMap, "logprobs", c.LogProbs) + objectMap["type"] = ChatCompletionRequestMessageContentPartTypeText populate(objectMap, "text", c.Text) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Choice. -func (c *Choice) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletionRequestMessageContentPartText. +func (c *ChatCompletionRequestMessageContentPartText) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -1079,17 +1142,8 @@ func (c *Choice) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "content_filter_results": - err = unpopulate(val, "ContentFilterResults", &c.ContentFilterResults) - delete(rawMsg, key) - case "finish_reason": - err = unpopulate(val, "FinishReason", &c.FinishReason) - delete(rawMsg, key) - case "index": - err = unpopulate(val, "Index", &c.Index) - delete(rawMsg, key) - case "logprobs": - err = unpopulate(val, "LogProbs", &c.LogProbs) + case "type": + err = unpopulate(val, "partType", &c.partType) delete(rawMsg, key) case "text": err = unpopulate(val, "Text", &c.Text) @@ -1102,19 +1156,20 @@ func (c *Choice) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ChoiceContentFilterResults. -func (c ChoiceContentFilterResults) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ChatCompletions. +func (c ChatCompletions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "error", c.Error) - populate(objectMap, "hate", c.Hate) - populate(objectMap, "self_harm", c.SelfHarm) - populate(objectMap, "sexual", c.Sexual) - populate(objectMap, "violence", c.Violence) + populate(objectMap, "choices", c.Choices) + populateTimeUnix(objectMap, "created", c.Created) + populate(objectMap, "id", c.ID) + populate(objectMap, "prompt_filter_results", c.PromptFilterResults) + populate(objectMap, "system_fingerprint", c.SystemFingerprint) + populate(objectMap, "usage", c.Usage) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ChoiceContentFilterResults. -func (c *ChoiceContentFilterResults) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletions. +func (c *ChatCompletions) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -1122,20 +1177,25 @@ func (c *ChoiceContentFilterResults) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "error": - err = unpopulate(val, "Error", &c.Error) + case "choices": + err = unpopulate(val, "Choices", &c.Choices) delete(rawMsg, key) - case "hate": - err = unpopulate(val, "Hate", &c.Hate) + case "created": + err = unpopulateTimeUnix(val, "Created", &c.Created) delete(rawMsg, key) - case "self_harm": - err = unpopulate(val, "SelfHarm", &c.SelfHarm) + case "id": + err = unpopulate(val, "ID", &c.ID) delete(rawMsg, key) - case "sexual": - err = unpopulate(val, "Sexual", &c.Sexual) + case "prompt_annotations": + fallthrough + case "prompt_filter_results": + err = unpopulate(val, "PromptFilterResults", &c.PromptFilterResults) delete(rawMsg, key) - case "violence": - err = unpopulate(val, "Violence", &c.Violence) + case "system_fingerprint": + err = unpopulate(val, "SystemFingerprint", &c.SystemFingerprint) + delete(rawMsg, key) + case "usage": + err = unpopulate(val, "Usage", &c.Usage) delete(rawMsg, key) } if err != nil { @@ -1145,18 +1205,17 @@ func (c *ChoiceContentFilterResults) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type ChoiceLogProbs. -func (c ChoiceLogProbs) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ChatCompletionsFunctionToolCall. +func (c ChatCompletionsFunctionToolCall) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "text_offset", c.TextOffset) - populate(objectMap, "token_logprobs", c.TokenLogProbs) - populate(objectMap, "tokens", c.Tokens) - populate(objectMap, "top_logprobs", c.TopLogProbs) + populate(objectMap, "function", c.Function) + populate(objectMap, "id", c.ID) + objectMap["type"] = "function" return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ChoiceLogProbs. -func (c *ChoiceLogProbs) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletionsFunctionToolCall. +func (c *ChatCompletionsFunctionToolCall) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -1164,17 +1223,14 @@ func (c *ChoiceLogProbs) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "text_offset": - err = unpopulate(val, "TextOffset", &c.TextOffset) + case "function": + err = unpopulate(val, "Function", &c.Function) delete(rawMsg, key) - case "token_logprobs": - err = unpopulate(val, "TokenLogProbs", &c.TokenLogProbs) - delete(rawMsg, key) - case "tokens": - err = unpopulate(val, "Tokens", &c.Tokens) + case "id": + err = unpopulate(val, "ID", &c.ID) delete(rawMsg, key) - case "top_logprobs": - err = unpopulate(val, "TopLogProbs", &c.TopLogProbs) + case "type": + err = unpopulate(val, "Type", &c.Type) delete(rawMsg, key) } if err != nil { @@ -1184,19 +1240,16 @@ func (c *ChoiceLogProbs) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type Completions. -func (c Completions) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ChatCompletionsFunctionToolDefinition. +func (c ChatCompletionsFunctionToolDefinition) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "choices", c.Choices) - populateTimeUnix(objectMap, "created", c.Created) - populate(objectMap, "id", c.ID) - populate(objectMap, "prompt_filter_results", c.PromptFilterResults) - populate(objectMap, "usage", c.Usage) + populate(objectMap, "function", c.Function) + objectMap["type"] = "function" return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Completions. -func (c *Completions) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletionsFunctionToolDefinition. +func (c *ChatCompletionsFunctionToolDefinition) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -1204,22 +1257,104 @@ func (c *Completions) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "choices": - err = unpopulate(val, "Choices", &c.Choices) + case "function": + err = unpopulate(val, "Function", &c.Function) delete(rawMsg, key) - case "created": - err = unpopulateTimeUnix(val, "Created", &c.Created) + case "type": + err = unpopulate(val, "Type", &c.Type) delete(rawMsg, key) - case "id": - err = unpopulate(val, "ID", &c.ID) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ChatCompletionsOptions. +func (c ChatCompletionsOptions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "dataSources", c.AzureExtensionsOptions) + populate(objectMap, "model", c.DeploymentName) + populate(objectMap, "enhancements", c.Enhancements) + populate(objectMap, "frequency_penalty", c.FrequencyPenalty) + populate(objectMap, "function_call", c.FunctionCall) + populate(objectMap, "functions", c.Functions) + populate(objectMap, "logit_bias", c.LogitBias) + populate(objectMap, "max_tokens", c.MaxTokens) + populate(objectMap, "messages", c.Messages) + populate(objectMap, "n", c.N) + populate(objectMap, "presence_penalty", c.PresencePenalty) + populate(objectMap, "response_format", c.ResponseFormat) + populate(objectMap, "seed", c.Seed) + populate(objectMap, "stop", c.Stop) + populate(objectMap, "temperature", c.Temperature) + populateAny(objectMap, "tool_choice", c.ToolChoice) + populate(objectMap, "tools", c.Tools) + populate(objectMap, "top_p", c.TopP) + populate(objectMap, "user", c.User) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletionsOptions. +func (c *ChatCompletionsOptions) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dataSources": + c.AzureExtensionsOptions, err = unmarshalAzureChatExtensionConfigurationClassificationArray(val) delete(rawMsg, key) - case "prompt_annotations": - fallthrough - case "prompt_filter_results": - err = unpopulate(val, "PromptFilterResults", &c.PromptFilterResults) + case "model": + err = unpopulate(val, "DeploymentName", &c.DeploymentName) delete(rawMsg, key) - case "usage": - err = unpopulate(val, "Usage", &c.Usage) + case "enhancements": + err = unpopulate(val, "Enhancements", &c.Enhancements) + delete(rawMsg, key) + case "frequency_penalty": + err = unpopulate(val, "FrequencyPenalty", &c.FrequencyPenalty) + delete(rawMsg, key) + case "logit_bias": + err = unpopulate(val, "LogitBias", &c.LogitBias) + delete(rawMsg, key) + case "max_tokens": + err = unpopulate(val, "MaxTokens", &c.MaxTokens) + delete(rawMsg, key) + case "messages": + c.Messages, err = unmarshalChatRequestMessageClassificationArray(val) + delete(rawMsg, key) + case "n": + err = unpopulate(val, "N", &c.N) + delete(rawMsg, key) + case "presence_penalty": + err = unpopulate(val, "PresencePenalty", &c.PresencePenalty) + delete(rawMsg, key) + case "response_format": + err = unpopulate(val, "ResponseFormat", &c.ResponseFormat) + delete(rawMsg, key) + case "seed": + err = unpopulate(val, "Seed", &c.Seed) + delete(rawMsg, key) + case "stop": + err = unpopulate(val, "Stop", &c.Stop) + delete(rawMsg, key) + case "temperature": + err = unpopulate(val, "Temperature", &c.Temperature) + delete(rawMsg, key) + case "tool_choice": + err = unpopulate(val, "ToolChoice", &c.ToolChoice) + delete(rawMsg, key) + case "tools": + c.Tools, err = unmarshalChatCompletionsToolDefinitionClassificationArray(val) + delete(rawMsg, key) + case "top_p": + err = unpopulate(val, "TopP", &c.TopP) + delete(rawMsg, key) + case "user": + err = unpopulate(val, "User", &c.User) delete(rawMsg, key) } if err != nil { @@ -1229,18 +1364,16 @@ func (c *Completions) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type CompletionsLogProbabilityModel. -func (c CompletionsLogProbabilityModel) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ChatCompletionsToolCall. +func (c ChatCompletionsToolCall) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "text_offset", c.TextOffset) - populate(objectMap, "token_logprobs", c.TokenLogProbs) - populate(objectMap, "tokens", c.Tokens) - populate(objectMap, "top_logprobs", c.TopLogProbs) + populate(objectMap, "id", c.ID) + objectMap["type"] = c.Type return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type CompletionsLogProbabilityModel. -func (c *CompletionsLogProbabilityModel) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletionsToolCall. +func (c *ChatCompletionsToolCall) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -1248,17 +1381,38 @@ func (c *CompletionsLogProbabilityModel) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "text_offset": - err = unpopulate(val, "TextOffset", &c.TextOffset) - delete(rawMsg, key) - case "token_logprobs": - err = unpopulate(val, "TokenLogProbs", &c.TokenLogProbs) + case "id": + err = unpopulate(val, "ID", &c.ID) delete(rawMsg, key) - case "tokens": - err = unpopulate(val, "Tokens", &c.Tokens) + case "type": + err = unpopulate(val, "Type", &c.Type) delete(rawMsg, key) - case "top_logprobs": - err = unpopulate(val, "TopLogProbs", &c.TopLogProbs) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ChatCompletionsToolDefinition. +func (c ChatCompletionsToolDefinition) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["type"] = c.Type + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatCompletionsToolDefinition. +func (c *ChatCompletionsToolDefinition) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "type": + err = unpopulate(val, "Type", &c.Type) delete(rawMsg, key) } if err != nil { @@ -1268,28 +1422,15 @@ func (c *CompletionsLogProbabilityModel) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type CompletionsOptions. -func (c CompletionsOptions) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ChatFinishDetails. +func (c ChatFinishDetails) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "best_of", c.BestOf) - populate(objectMap, "echo", c.Echo) - populate(objectMap, "frequency_penalty", c.FrequencyPenalty) - populate(objectMap, "logit_bias", c.LogitBias) - populate(objectMap, "logprobs", c.LogProbs) - populate(objectMap, "max_tokens", c.MaxTokens) - populate(objectMap, "model", &c.Deployment) - populate(objectMap, "n", c.N) - populate(objectMap, "presence_penalty", c.PresencePenalty) - populate(objectMap, "prompt", c.Prompt) - populate(objectMap, "stop", c.Stop) - populate(objectMap, "temperature", c.Temperature) - populate(objectMap, "top_p", c.TopP) - populate(objectMap, "user", c.User) + objectMap["type"] = c.Type return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type CompletionsOptions. -func (c *CompletionsOptions) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatFinishDetails. +func (c *ChatFinishDetails) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -1297,544 +1438,1713 @@ func (c *CompletionsOptions) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "best_of": - err = unpopulate(val, "BestOf", &c.BestOf) + case "type": + err = unpopulate(val, "Type", &c.Type) delete(rawMsg, key) - case "echo": - err = unpopulate(val, "Echo", &c.Echo) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ChatRequestAssistantMessage. +func (c ChatRequestAssistantMessage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "content", c.Content) + objectMap["role"] = ChatRoleAssistant + populate(objectMap, "name", c.Name) + populate(objectMap, "tool_calls", c.ToolCalls) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatRequestAssistantMessage. +func (c *ChatRequestAssistantMessage) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "content": + err = unpopulate(val, "Content", &c.Content) delete(rawMsg, key) - case "frequency_penalty": - err = unpopulate(val, "FrequencyPenalty", &c.FrequencyPenalty) + case "role": + err = unpopulate(val, "role", &c.role) delete(rawMsg, key) - case "logit_bias": - err = unpopulate(val, "LogitBias", &c.LogitBias) + case "name": + err = unpopulate(val, "Name", &c.Name) delete(rawMsg, key) - case "logprobs": - err = unpopulate(val, "LogProbs", &c.LogProbs) + case "tool_calls": + c.ToolCalls, err = unmarshalChatCompletionsToolCallClassificationArray(val) delete(rawMsg, key) - case "max_tokens": - err = unpopulate(val, "MaxTokens", &c.MaxTokens) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ChatRequestMessage. +func (c ChatRequestMessage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["role"] = c.role + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatRequestMessage. +func (c *ChatRequestMessage) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "role": + err = unpopulate(val, "role", &c.role) delete(rawMsg, key) - case "model": - err = unpopulate(val, "Model", &c.Deployment) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ChatRequestSystemMessage. +func (c ChatRequestSystemMessage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "content", c.Content) + objectMap["role"] = ChatRoleSystem + populate(objectMap, "name", c.Name) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatRequestSystemMessage. +func (c *ChatRequestSystemMessage) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "content": + err = unpopulate(val, "Content", &c.Content) delete(rawMsg, key) - case "n": - err = unpopulate(val, "N", &c.N) + case "role": + err = unpopulate(val, "role", &c.role) delete(rawMsg, key) - case "presence_penalty": - err = unpopulate(val, "PresencePenalty", &c.PresencePenalty) + case "name": + err = unpopulate(val, "Name", &c.Name) delete(rawMsg, key) - case "prompt": - err = unpopulate(val, "Prompt", &c.Prompt) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ChatRequestToolMessage. +func (c ChatRequestToolMessage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "content", c.Content) + objectMap["role"] = ChatRoleTool + populate(objectMap, "tool_call_id", c.ToolCallID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatRequestToolMessage. +func (c *ChatRequestToolMessage) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "content": + err = unpopulate(val, "Content", &c.Content) delete(rawMsg, key) - case "stop": - err = unpopulate(val, "Stop", &c.Stop) + case "role": + err = unpopulate(val, "role", &c.role) delete(rawMsg, key) - case "temperature": - err = unpopulate(val, "Temperature", &c.Temperature) + case "tool_call_id": + err = unpopulate(val, "ToolCallID", &c.ToolCallID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ChatRequestUserMessage. +func (c ChatRequestUserMessage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateAny(objectMap, "content", c.Content) + objectMap["role"] = ChatRoleUser + populate(objectMap, "name", c.Name) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatRequestUserMessage. +func (c *ChatRequestUserMessage) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "content": + err = unpopulate(val, "Content", &c.Content) + delete(rawMsg, key) + case "role": + err = unpopulate(val, "role", &c.role) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ChatResponseMessage. +func (c ChatResponseMessage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "content", c.Content) + populate(objectMap, "context", c.Context) + populate(objectMap, "function_call", c.FunctionCall) + populate(objectMap, "role", c.Role) + populate(objectMap, "tool_calls", c.ToolCalls) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ChatResponseMessage. +func (c *ChatResponseMessage) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "content": + err = unpopulate(val, "Content", &c.Content) + delete(rawMsg, key) + case "context": + err = unpopulate(val, "Context", &c.Context) + delete(rawMsg, key) + case "function_call": + err = unpopulate(val, "FunctionCall", &c.FunctionCall) + delete(rawMsg, key) + case "role": + err = unpopulate(val, "Role", &c.Role) + delete(rawMsg, key) + case "tool_calls": + c.ToolCalls, err = unmarshalChatCompletionsToolCallClassificationArray(val) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Choice. +func (c Choice) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "content_filter_results", c.ContentFilterResults) + populate(objectMap, "finish_reason", c.FinishReason) + populate(objectMap, "index", c.Index) + populate(objectMap, "logprobs", c.LogProbs) + populate(objectMap, "text", c.Text) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Choice. +func (c *Choice) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "content_filter_results": + err = unpopulate(val, "ContentFilterResults", &c.ContentFilterResults) + delete(rawMsg, key) + case "finish_reason": + err = unpopulate(val, "FinishReason", &c.FinishReason) + delete(rawMsg, key) + case "index": + err = unpopulate(val, "Index", &c.Index) + delete(rawMsg, key) + case "logprobs": + err = unpopulate(val, "LogProbs", &c.LogProbs) + delete(rawMsg, key) + case "text": + err = unpopulate(val, "Text", &c.Text) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ChoiceLogProbs. +func (c ChoiceLogProbs) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "text_offset", c.TextOffset) + populate(objectMap, "token_logprobs", c.TokenLogProbs) + populate(objectMap, "tokens", c.Tokens) + populate(objectMap, "top_logprobs", c.TopLogProbs) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ChoiceLogProbs. +func (c *ChoiceLogProbs) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "text_offset": + err = unpopulate(val, "TextOffset", &c.TextOffset) + delete(rawMsg, key) + case "token_logprobs": + err = unpopulate(val, "TokenLogProbs", &c.TokenLogProbs) + delete(rawMsg, key) + case "tokens": + err = unpopulate(val, "Tokens", &c.Tokens) + delete(rawMsg, key) + case "top_logprobs": + err = unpopulate(val, "TopLogProbs", &c.TopLogProbs) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Completions. +func (c Completions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "choices", c.Choices) + populateTimeUnix(objectMap, "created", c.Created) + populate(objectMap, "id", c.ID) + populate(objectMap, "prompt_filter_results", c.PromptFilterResults) + populate(objectMap, "usage", c.Usage) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Completions. +func (c *Completions) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "choices": + err = unpopulate(val, "Choices", &c.Choices) + delete(rawMsg, key) + case "created": + err = unpopulateTimeUnix(val, "Created", &c.Created) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &c.ID) + delete(rawMsg, key) + case "prompt_annotations": + fallthrough + case "prompt_filter_results": + err = unpopulate(val, "PromptFilterResults", &c.PromptFilterResults) + delete(rawMsg, key) + case "usage": + err = unpopulate(val, "Usage", &c.Usage) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CompletionsLogProbabilityModel. +func (c CompletionsLogProbabilityModel) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "text_offset", c.TextOffset) + populate(objectMap, "token_logprobs", c.TokenLogProbs) + populate(objectMap, "tokens", c.Tokens) + populate(objectMap, "top_logprobs", c.TopLogProbs) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CompletionsLogProbabilityModel. +func (c *CompletionsLogProbabilityModel) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "text_offset": + err = unpopulate(val, "TextOffset", &c.TextOffset) + delete(rawMsg, key) + case "token_logprobs": + err = unpopulate(val, "TokenLogProbs", &c.TokenLogProbs) + delete(rawMsg, key) + case "tokens": + err = unpopulate(val, "Tokens", &c.Tokens) + delete(rawMsg, key) + case "top_logprobs": + err = unpopulate(val, "TopLogProbs", &c.TopLogProbs) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CompletionsOptions. +func (c CompletionsOptions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "best_of", c.BestOf) + populate(objectMap, "model", c.DeploymentName) + populate(objectMap, "echo", c.Echo) + populate(objectMap, "frequency_penalty", c.FrequencyPenalty) + populate(objectMap, "logit_bias", c.LogitBias) + populate(objectMap, "logprobs", c.LogProbs) + populate(objectMap, "max_tokens", c.MaxTokens) + populate(objectMap, "n", c.N) + populate(objectMap, "presence_penalty", c.PresencePenalty) + populate(objectMap, "prompt", c.Prompt) + populate(objectMap, "stop", c.Stop) + populate(objectMap, "temperature", c.Temperature) + populate(objectMap, "top_p", c.TopP) + populate(objectMap, "user", c.User) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CompletionsOptions. +func (c *CompletionsOptions) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "best_of": + err = unpopulate(val, "BestOf", &c.BestOf) + delete(rawMsg, key) + case "model": + err = unpopulate(val, "DeploymentName", &c.DeploymentName) + delete(rawMsg, key) + case "echo": + err = unpopulate(val, "Echo", &c.Echo) + delete(rawMsg, key) + case "frequency_penalty": + err = unpopulate(val, "FrequencyPenalty", &c.FrequencyPenalty) + delete(rawMsg, key) + case "logit_bias": + err = unpopulate(val, "LogitBias", &c.LogitBias) + delete(rawMsg, key) + case "logprobs": + err = unpopulate(val, "LogProbs", &c.LogProbs) + delete(rawMsg, key) + case "max_tokens": + err = unpopulate(val, "MaxTokens", &c.MaxTokens) + delete(rawMsg, key) + case "n": + err = unpopulate(val, "N", &c.N) + delete(rawMsg, key) + case "presence_penalty": + err = unpopulate(val, "PresencePenalty", &c.PresencePenalty) + delete(rawMsg, key) + case "prompt": + err = unpopulate(val, "Prompt", &c.Prompt) + delete(rawMsg, key) + case "stop": + err = unpopulate(val, "Stop", &c.Stop) + delete(rawMsg, key) + case "temperature": + err = unpopulate(val, "Temperature", &c.Temperature) delete(rawMsg, key) case "top_p": err = unpopulate(val, "TopP", &c.TopP) delete(rawMsg, key) case "user": - err = unpopulate(val, "User", &c.User) + err = unpopulate(val, "User", &c.User) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CompletionsUsage. +func (c CompletionsUsage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "completion_tokens", c.CompletionTokens) + populate(objectMap, "prompt_tokens", c.PromptTokens) + populate(objectMap, "total_tokens", c.TotalTokens) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CompletionsUsage. +func (c *CompletionsUsage) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "completion_tokens": + err = unpopulate(val, "CompletionTokens", &c.CompletionTokens) + delete(rawMsg, key) + case "prompt_tokens": + err = unpopulate(val, "PromptTokens", &c.PromptTokens) + delete(rawMsg, key) + case "total_tokens": + err = unpopulate(val, "TotalTokens", &c.TotalTokens) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ContentFilterBlocklistIDResult. +func (c ContentFilterBlocklistIDResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "filtered", c.Filtered) + populate(objectMap, "id", c.ID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ContentFilterBlocklistIDResult. +func (c *ContentFilterBlocklistIDResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "filtered": + err = unpopulate(val, "Filtered", &c.Filtered) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &c.ID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ContentFilterCitedDetectionResult. +func (c ContentFilterCitedDetectionResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "detected", c.Detected) + populate(objectMap, "filtered", c.Filtered) + populate(objectMap, "license", c.License) + populate(objectMap, "URL", c.URL) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ContentFilterCitedDetectionResult. +func (c *ContentFilterCitedDetectionResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "detected": + err = unpopulate(val, "Detected", &c.Detected) + delete(rawMsg, key) + case "filtered": + err = unpopulate(val, "Filtered", &c.Filtered) + delete(rawMsg, key) + case "license": + err = unpopulate(val, "License", &c.License) + delete(rawMsg, key) + case "URL": + err = unpopulate(val, "URL", &c.URL) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ContentFilterDetectionResult. +func (c ContentFilterDetectionResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "detected", c.Detected) + populate(objectMap, "filtered", c.Filtered) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ContentFilterDetectionResult. +func (c *ContentFilterDetectionResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "detected": + err = unpopulate(val, "Detected", &c.Detected) + delete(rawMsg, key) + case "filtered": + err = unpopulate(val, "Filtered", &c.Filtered) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ContentFilterResult. +func (c ContentFilterResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "filtered", c.Filtered) + populate(objectMap, "severity", c.Severity) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ContentFilterResult. +func (c *ContentFilterResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "filtered": + err = unpopulate(val, "Filtered", &c.Filtered) + delete(rawMsg, key) + case "severity": + err = unpopulate(val, "Severity", &c.Severity) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ContentFilterResultDetailsForPrompt. +func (c ContentFilterResultDetailsForPrompt) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "custom_blocklists", c.CustomBlocklists) + populate(objectMap, "error", c.Error) + populate(objectMap, "hate", c.Hate) + populate(objectMap, "jailbreak", c.Jailbreak) + populate(objectMap, "profanity", c.Profanity) + populate(objectMap, "self_harm", c.SelfHarm) + populate(objectMap, "sexual", c.Sexual) + populate(objectMap, "violence", c.Violence) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ContentFilterResultDetailsForPrompt. +func (c *ContentFilterResultDetailsForPrompt) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "custom_blocklists": + err = unpopulate(val, "CustomBlocklists", &c.CustomBlocklists) + delete(rawMsg, key) + case "error": + err = unpopulate(val, "Error", &c.Error) + delete(rawMsg, key) + case "hate": + err = unpopulate(val, "Hate", &c.Hate) + delete(rawMsg, key) + case "jailbreak": + err = unpopulate(val, "Jailbreak", &c.Jailbreak) + delete(rawMsg, key) + case "profanity": + err = unpopulate(val, "Profanity", &c.Profanity) + delete(rawMsg, key) + case "self_harm": + err = unpopulate(val, "SelfHarm", &c.SelfHarm) + delete(rawMsg, key) + case "sexual": + err = unpopulate(val, "Sexual", &c.Sexual) + delete(rawMsg, key) + case "violence": + err = unpopulate(val, "Violence", &c.Violence) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ContentFilterResultsForChoice. +func (c ContentFilterResultsForChoice) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "custom_blocklists", c.CustomBlocklists) + populate(objectMap, "error", c.Error) + populate(objectMap, "hate", c.Hate) + populate(objectMap, "profanity", c.Profanity) + populate(objectMap, "protected_material_code", c.ProtectedMaterialCode) + populate(objectMap, "protected_material_text", c.ProtectedMaterialText) + populate(objectMap, "self_harm", c.SelfHarm) + populate(objectMap, "sexual", c.Sexual) + populate(objectMap, "violence", c.Violence) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ContentFilterResultsForChoice. +func (c *ContentFilterResultsForChoice) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "custom_blocklists": + err = unpopulate(val, "CustomBlocklists", &c.CustomBlocklists) + delete(rawMsg, key) + case "error": + err = unpopulate(val, "Error", &c.Error) + delete(rawMsg, key) + case "hate": + err = unpopulate(val, "Hate", &c.Hate) + delete(rawMsg, key) + case "profanity": + err = unpopulate(val, "Profanity", &c.Profanity) + delete(rawMsg, key) + case "protected_material_code": + err = unpopulate(val, "ProtectedMaterialCode", &c.ProtectedMaterialCode) + delete(rawMsg, key) + case "protected_material_text": + err = unpopulate(val, "ProtectedMaterialText", &c.ProtectedMaterialText) + delete(rawMsg, key) + case "self_harm": + err = unpopulate(val, "SelfHarm", &c.SelfHarm) + delete(rawMsg, key) + case "sexual": + err = unpopulate(val, "Sexual", &c.Sexual) + delete(rawMsg, key) + case "violence": + err = unpopulate(val, "Violence", &c.Violence) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ContentFilterResultsForPrompt. +func (c ContentFilterResultsForPrompt) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "content_filter_results", c.ContentFilterResults) + populate(objectMap, "prompt_index", c.PromptIndex) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ContentFilterResultsForPrompt. +func (c *ContentFilterResultsForPrompt) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "content_filter_results": + err = unpopulate(val, "ContentFilterResults", &c.ContentFilterResults) + delete(rawMsg, key) + case "prompt_index": + err = unpopulate(val, "PromptIndex", &c.PromptIndex) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ElasticsearchChatExtensionConfiguration. +func (e ElasticsearchChatExtensionConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["type"] = AzureChatExtensionTypeElasticsearch + populate(objectMap, "parameters", e.Parameters) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ElasticsearchChatExtensionConfiguration. +func (e *ElasticsearchChatExtensionConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "type": + err = unpopulate(val, "configType", &e.configType) + delete(rawMsg, key) + case "parameters": + err = unpopulate(val, "Parameters", &e.Parameters) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ElasticsearchChatExtensionParameters. +func (e ElasticsearchChatExtensionParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "authentication", e.Authentication) + populate(objectMap, "embeddingDependency", e.EmbeddingDependency) + populate(objectMap, "endpoint", e.Endpoint) + populate(objectMap, "fieldsMapping", e.FieldsMapping) + populate(objectMap, "inScope", e.InScope) + populate(objectMap, "indexName", e.IndexName) + populate(objectMap, "queryType", e.QueryType) + populate(objectMap, "roleInformation", e.RoleInformation) + populate(objectMap, "strictness", e.Strictness) + populate(objectMap, "topNDocuments", e.TopNDocuments) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ElasticsearchChatExtensionParameters. +func (e *ElasticsearchChatExtensionParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "authentication": + e.Authentication, err = unmarshalOnYourDataAuthenticationOptionsClassification(val) + delete(rawMsg, key) + case "embeddingDependency": + e.EmbeddingDependency, err = unmarshalOnYourDataVectorizationSourceClassification(val) + delete(rawMsg, key) + case "endpoint": + err = unpopulate(val, "Endpoint", &e.Endpoint) + delete(rawMsg, key) + case "fieldsMapping": + err = unpopulate(val, "FieldsMapping", &e.FieldsMapping) + delete(rawMsg, key) + case "inScope": + err = unpopulate(val, "InScope", &e.InScope) + delete(rawMsg, key) + case "indexName": + err = unpopulate(val, "IndexName", &e.IndexName) + delete(rawMsg, key) + case "queryType": + err = unpopulate(val, "QueryType", &e.QueryType) + delete(rawMsg, key) + case "roleInformation": + err = unpopulate(val, "RoleInformation", &e.RoleInformation) + delete(rawMsg, key) + case "strictness": + err = unpopulate(val, "Strictness", &e.Strictness) + delete(rawMsg, key) + case "topNDocuments": + err = unpopulate(val, "TopNDocuments", &e.TopNDocuments) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ElasticsearchIndexFieldMappingOptions. +func (e ElasticsearchIndexFieldMappingOptions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "contentFields", e.ContentFields) + populate(objectMap, "contentFieldsSeparator", e.ContentFieldsSeparator) + populate(objectMap, "filepathField", e.FilepathField) + populate(objectMap, "titleField", e.TitleField) + populate(objectMap, "urlField", e.URLField) + populate(objectMap, "vectorFields", e.VectorFields) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ElasticsearchIndexFieldMappingOptions. +func (e *ElasticsearchIndexFieldMappingOptions) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contentFields": + err = unpopulate(val, "ContentFields", &e.ContentFields) + delete(rawMsg, key) + case "contentFieldsSeparator": + err = unpopulate(val, "ContentFieldsSeparator", &e.ContentFieldsSeparator) + delete(rawMsg, key) + case "filepathField": + err = unpopulate(val, "FilepathField", &e.FilepathField) + delete(rawMsg, key) + case "titleField": + err = unpopulate(val, "TitleField", &e.TitleField) + delete(rawMsg, key) + case "urlField": + err = unpopulate(val, "URLField", &e.URLField) + delete(rawMsg, key) + case "vectorFields": + err = unpopulate(val, "VectorFields", &e.VectorFields) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EmbeddingItem. +func (e EmbeddingItem) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "embedding", e.Embedding) + populate(objectMap, "index", e.Index) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EmbeddingItem. +func (e *EmbeddingItem) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "embedding": + err = unpopulate(val, "Embedding", &e.Embedding) + delete(rawMsg, key) + case "index": + err = unpopulate(val, "Index", &e.Index) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Embeddings. +func (e Embeddings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "data", e.Data) + populate(objectMap, "usage", e.Usage) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Embeddings. +func (e *Embeddings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "data": + err = unpopulate(val, "Data", &e.Data) + delete(rawMsg, key) + case "usage": + err = unpopulate(val, "Usage", &e.Usage) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EmbeddingsOptions. +func (e EmbeddingsOptions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "model", e.DeploymentName) + populate(objectMap, "input", e.Input) + populate(objectMap, "user", e.User) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EmbeddingsOptions. +func (e *EmbeddingsOptions) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "model": + err = unpopulate(val, "DeploymentName", &e.DeploymentName) + delete(rawMsg, key) + case "input": + err = unpopulate(val, "Input", &e.Input) + delete(rawMsg, key) + case "user": + err = unpopulate(val, "User", &e.User) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EmbeddingsUsage. +func (e EmbeddingsUsage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "prompt_tokens", e.PromptTokens) + populate(objectMap, "total_tokens", e.TotalTokens) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EmbeddingsUsage. +func (e *EmbeddingsUsage) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "prompt_tokens": + err = unpopulate(val, "PromptTokens", &e.PromptTokens) + delete(rawMsg, key) + case "total_tokens": + err = unpopulate(val, "TotalTokens", &e.TotalTokens) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Error. +func (e Error) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "code", e.Code) + populate(objectMap, "message", e.message) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Error. +func (e *Error) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "code": + err = unpopulate(val, "Code", &e.Code) + delete(rawMsg, key) + case "message": + err = unpopulate(val, "message", &e.message) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FunctionCall. +func (f FunctionCall) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "arguments", f.Arguments) + populate(objectMap, "name", f.Name) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FunctionCall. +func (f *FunctionCall) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "arguments": + err = unpopulate(val, "Arguments", &f.Arguments) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &f.Name) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FunctionDefinition. +func (f FunctionDefinition) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", f.Description) + populate(objectMap, "name", f.Name) + populateAny(objectMap, "parameters", f.Parameters) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FunctionDefinition. +func (f *FunctionDefinition) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &f.Description) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &f.Name) + delete(rawMsg, key) + case "parameters": + err = unpopulate(val, "Parameters", &f.Parameters) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageGenerationData. +func (i ImageGenerationData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "b64_json", i.Base64Data) + populate(objectMap, "revised_prompt", i.RevisedPrompt) + populate(objectMap, "url", i.URL) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageGenerationData. +func (i *ImageGenerationData) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "b64_json": + err = unpopulate(val, "Base64Data", &i.Base64Data) + delete(rawMsg, key) + case "revised_prompt": + err = unpopulate(val, "RevisedPrompt", &i.RevisedPrompt) + delete(rawMsg, key) + case "url": + err = unpopulate(val, "URL", &i.URL) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ImageGenerationOptions. +func (i ImageGenerationOptions) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "model", i.DeploymentName) + populate(objectMap, "n", i.N) + populate(objectMap, "prompt", i.Prompt) + populate(objectMap, "quality", i.Quality) + populate(objectMap, "response_format", i.ResponseFormat) + populate(objectMap, "size", i.Size) + populate(objectMap, "style", i.Style) + populate(objectMap, "user", i.User) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageGenerationOptions. +func (i *ImageGenerationOptions) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "model": + err = unpopulate(val, "DeploymentName", &i.DeploymentName) + delete(rawMsg, key) + case "n": + err = unpopulate(val, "N", &i.N) + delete(rawMsg, key) + case "prompt": + err = unpopulate(val, "Prompt", &i.Prompt) + delete(rawMsg, key) + case "quality": + err = unpopulate(val, "Quality", &i.Quality) + delete(rawMsg, key) + case "response_format": + err = unpopulate(val, "ResponseFormat", &i.ResponseFormat) + delete(rawMsg, key) + case "size": + err = unpopulate(val, "Size", &i.Size) + delete(rawMsg, key) + case "style": + err = unpopulate(val, "Style", &i.Style) + delete(rawMsg, key) + case "user": + err = unpopulate(val, "User", &i.User) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", i, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type CompletionsUsage. -func (c CompletionsUsage) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type ImageGenerations. +func (i ImageGenerations) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "completion_tokens", c.CompletionTokens) - populate(objectMap, "prompt_tokens", c.PromptTokens) - populate(objectMap, "total_tokens", c.TotalTokens) + populateTimeUnix(objectMap, "created", i.Created) + populate(objectMap, "data", i.Data) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type CompletionsUsage. -func (c *CompletionsUsage) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type ImageGenerations. +func (i *ImageGenerations) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", i, err) } for key, val := range rawMsg { var err error switch key { - case "completion_tokens": - err = unpopulate(val, "CompletionTokens", &c.CompletionTokens) - delete(rawMsg, key) - case "prompt_tokens": - err = unpopulate(val, "PromptTokens", &c.PromptTokens) + case "created": + err = unpopulateTimeUnix(val, "Created", &i.Created) delete(rawMsg, key) - case "total_tokens": - err = unpopulate(val, "TotalTokens", &c.TotalTokens) + case "data": + err = unpopulate(val, "Data", &i.Data) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", i, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ContentFilterResult. -func (c ContentFilterResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type MaxTokensFinishDetails. +func (m MaxTokensFinishDetails) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "filtered", c.Filtered) - populate(objectMap, "severity", c.Severity) + objectMap["type"] = "max_tokens" return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ContentFilterResult. -func (c *ContentFilterResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type MaxTokensFinishDetails. +func (m *MaxTokensFinishDetails) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", m, err) } for key, val := range rawMsg { var err error switch key { - case "filtered": - err = unpopulate(val, "Filtered", &c.Filtered) - delete(rawMsg, key) - case "severity": - err = unpopulate(val, "Severity", &c.Severity) + case "type": + err = unpopulate(val, "Type", &m.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", m, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ContentFilterResults. -func (c ContentFilterResults) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnYourDataAPIKeyAuthenticationOptions. +func (o OnYourDataAPIKeyAuthenticationOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "error", c.Error) - populate(objectMap, "hate", c.Hate) - populate(objectMap, "self_harm", c.SelfHarm) - populate(objectMap, "sexual", c.Sexual) - populate(objectMap, "violence", c.Violence) + objectMap["type"] = OnYourDataAuthenticationTypeAPIKey + populate(objectMap, "key", o.Key) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ContentFilterResults. -func (c *ContentFilterResults) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnYourDataAPIKeyAuthenticationOptions. +func (o *OnYourDataAPIKeyAuthenticationOptions) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "error": - err = unpopulate(val, "Error", &c.Error) - delete(rawMsg, key) - case "hate": - err = unpopulate(val, "Hate", &c.Hate) - delete(rawMsg, key) - case "self_harm": - err = unpopulate(val, "SelfHarm", &c.SelfHarm) - delete(rawMsg, key) - case "sexual": - err = unpopulate(val, "Sexual", &c.Sexual) + case "type": + err = unpopulate(val, "configType", &o.configType) delete(rawMsg, key) - case "violence": - err = unpopulate(val, "Violence", &c.Violence) + case "key": + err = unpopulate(val, "Key", &o.Key) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ContentFilterResultsError. -func (c ContentFilterResultsError) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnYourDataAuthenticationOptions. +func (o OnYourDataAuthenticationOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "code", c.Code) - populate(objectMap, "details", c.Details) - populate(objectMap, "innererror", c.InnerError) - populate(objectMap, "message", c.Message) - populate(objectMap, "target", c.Target) + objectMap["type"] = o.configType return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ContentFilterResultsError. -func (c *ContentFilterResultsError) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnYourDataAuthenticationOptions. +func (o *OnYourDataAuthenticationOptions) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "code": - err = unpopulate(val, "Code", &c.Code) - delete(rawMsg, key) - case "details": - err = unpopulate(val, "Details", &c.Details) - delete(rawMsg, key) - case "innererror": - err = unpopulate(val, "Innererror", &c.InnerError) - delete(rawMsg, key) - case "message": - err = unpopulate(val, "Message", &c.Message) - delete(rawMsg, key) - case "target": - err = unpopulate(val, "Target", &c.Target) + case "type": + err = unpopulate(val, "configType", &o.configType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", c, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type EmbeddingItem. -func (e EmbeddingItem) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnYourDataConnectionStringAuthenticationOptions. +func (o OnYourDataConnectionStringAuthenticationOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "embedding", e.Embedding) - populate(objectMap, "index", e.Index) + populate(objectMap, "connectionString", o.ConnectionString) + objectMap["type"] = OnYourDataAuthenticationTypeConnectionString return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type EmbeddingItem. -func (e *EmbeddingItem) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnYourDataConnectionStringAuthenticationOptions. +func (o *OnYourDataConnectionStringAuthenticationOptions) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", e, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "embedding": - err = unpopulate(val, "Embedding", &e.Embedding) + case "connectionString": + err = unpopulate(val, "ConnectionString", &o.ConnectionString) delete(rawMsg, key) - case "index": - err = unpopulate(val, "Index", &e.Index) + case "type": + err = unpopulate(val, "configType", &o.configType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", e, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type Embeddings. -func (e Embeddings) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnYourDataDeploymentNameVectorizationSource. +func (o OnYourDataDeploymentNameVectorizationSource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "data", e.Data) - populate(objectMap, "usage", e.Usage) + populate(objectMap, "deploymentName", o.DeploymentName) + objectMap["type"] = OnYourDataVectorizationSourceTypeDeploymentName return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type Embeddings. -func (e *Embeddings) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnYourDataDeploymentNameVectorizationSource. +func (o *OnYourDataDeploymentNameVectorizationSource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", e, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "data": - err = unpopulate(val, "Data", &e.Data) + case "deploymentName": + err = unpopulate(val, "DeploymentName", &o.DeploymentName) delete(rawMsg, key) - case "usage": - err = unpopulate(val, "Usage", &e.Usage) + case "type": + err = unpopulate(val, "Type", &o.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", e, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type EmbeddingsOptions. -func (e EmbeddingsOptions) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnYourDataEndpointVectorizationSource. +func (o OnYourDataEndpointVectorizationSource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "input", e.Input) - populate(objectMap, "model", &e.Deployment) - populate(objectMap, "user", e.User) + populate(objectMap, "authentication", o.Authentication) + populate(objectMap, "endpoint", o.Endpoint) + objectMap["type"] = OnYourDataVectorizationSourceTypeEndpoint return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type EmbeddingsOptions. -func (e *EmbeddingsOptions) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnYourDataEndpointVectorizationSource. +func (o *OnYourDataEndpointVectorizationSource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", e, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "input": - err = unpopulate(val, "Input", &e.Input) + case "authentication": + o.Authentication, err = unmarshalOnYourDataAuthenticationOptionsClassification(val) delete(rawMsg, key) - case "model": - err = unpopulate(val, "Model", &e.Deployment) + case "endpoint": + err = unpopulate(val, "Endpoint", &o.Endpoint) delete(rawMsg, key) - case "user": - err = unpopulate(val, "User", &e.User) + case "type": + err = unpopulate(val, "Type", &o.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", e, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type EmbeddingsUsage. -func (e EmbeddingsUsage) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnYourDataKeyAndKeyIDAuthenticationOptions. +func (o OnYourDataKeyAndKeyIDAuthenticationOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "prompt_tokens", e.PromptTokens) - populate(objectMap, "total_tokens", e.TotalTokens) + objectMap["type"] = OnYourDataAuthenticationTypeKeyAndKeyID + populate(objectMap, "key", o.Key) + populate(objectMap, "keyId", o.KeyID) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type EmbeddingsUsage. -func (e *EmbeddingsUsage) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnYourDataKeyAndKeyIDAuthenticationOptions. +func (o *OnYourDataKeyAndKeyIDAuthenticationOptions) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", e, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "prompt_tokens": - err = unpopulate(val, "PromptTokens", &e.PromptTokens) + case "type": + err = unpopulate(val, "configType", &o.configType) delete(rawMsg, key) - case "total_tokens": - err = unpopulate(val, "TotalTokens", &e.TotalTokens) + case "key": + err = unpopulate(val, "Key", &o.Key) + delete(rawMsg, key) + case "keyId": + err = unpopulate(val, "KeyID", &o.KeyID) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", e, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type EmbeddingsUsageAutoGenerated. -func (e EmbeddingsUsageAutoGenerated) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnYourDataModelIDVectorizationSource. +func (o OnYourDataModelIDVectorizationSource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "prompt_tokens", e.PromptTokens) - populate(objectMap, "total_tokens", e.TotalTokens) + populate(objectMap, "modelId", o.ModelID) + objectMap["type"] = OnYourDataVectorizationSourceTypeModelID return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type EmbeddingsUsageAutoGenerated. -func (e *EmbeddingsUsageAutoGenerated) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnYourDataModelIDVectorizationSource. +func (o *OnYourDataModelIDVectorizationSource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", e, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "prompt_tokens": - err = unpopulate(val, "PromptTokens", &e.PromptTokens) + case "modelId": + err = unpopulate(val, "ModelID", &o.ModelID) delete(rawMsg, key) - case "total_tokens": - err = unpopulate(val, "TotalTokens", &e.TotalTokens) + case "type": + err = unpopulate(val, "Type", &o.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", e, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type FunctionCall. -func (f FunctionCall) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnYourDataSystemAssignedManagedIdentityAuthenticationOptions. +func (o OnYourDataSystemAssignedManagedIdentityAuthenticationOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "arguments", f.Arguments) - populate(objectMap, "name", f.Name) + objectMap["type"] = OnYourDataAuthenticationTypeSystemAssignedManagedIdentity return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type FunctionCall. -func (f *FunctionCall) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnYourDataSystemAssignedManagedIdentityAuthenticationOptions. +func (o *OnYourDataSystemAssignedManagedIdentityAuthenticationOptions) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", f, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "arguments": - err = unpopulate(val, "Arguments", &f.Arguments) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &f.Name) + case "type": + err = unpopulate(val, "configType", &o.configType) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", f, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type FunctionDefinition. -func (f FunctionDefinition) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnYourDataUserAssignedManagedIdentityAuthenticationOptions. +func (o OnYourDataUserAssignedManagedIdentityAuthenticationOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "description", f.Description) - populate(objectMap, "name", f.Name) - populateAny(objectMap, "parameters", f.Parameters) + objectMap["type"] = OnYourDataAuthenticationTypeUserAssignedManagedIdentity + populate(objectMap, "managedIdentityResourceId", o.ManagedIdentityResourceID) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type FunctionDefinition. -func (f *FunctionDefinition) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnYourDataUserAssignedManagedIdentityAuthenticationOptions. +func (o *OnYourDataUserAssignedManagedIdentityAuthenticationOptions) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", f, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "description": - err = unpopulate(val, "Description", &f.Description) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &f.Name) + case "type": + err = unpopulate(val, "configType", &o.configType) delete(rawMsg, key) - case "parameters": - err = unpopulate(val, "Parameters", &f.Parameters) + case "managedIdentityResourceId": + err = unpopulate(val, "ManagedIdentityResourceID", &o.ManagedIdentityResourceID) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", f, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type FunctionName. -func (f FunctionName) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type OnYourDataVectorizationSource. +func (o OnYourDataVectorizationSource) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "name", f.Name) + objectMap["type"] = o.Type return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type FunctionName. -func (f *FunctionName) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type OnYourDataVectorizationSource. +func (o *OnYourDataVectorizationSource) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", f, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } for key, val := range rawMsg { var err error switch key { - case "name": - err = unpopulate(val, "Name", &f.Name) + case "type": + err = unpopulate(val, "Type", &o.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", f, err) + return fmt.Errorf("unmarshalling type %T: %v", o, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageGenerationOptions. -func (i ImageGenerationOptions) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PineconeChatExtensionConfiguration. +func (p PineconeChatExtensionConfiguration) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "n", i.N) - populate(objectMap, "prompt", i.Prompt) - populate(objectMap, "response_format", i.ResponseFormat) - populate(objectMap, "size", i.Size) - populate(objectMap, "user", i.User) + objectMap["type"] = AzureChatExtensionTypePinecone + populate(objectMap, "parameters", p.Parameters) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageGenerationOptions. -func (i *ImageGenerationOptions) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PineconeChatExtensionConfiguration. +func (p *PineconeChatExtensionConfiguration) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "n": - err = unpopulate(val, "N", &i.N) - delete(rawMsg, key) - case "prompt": - err = unpopulate(val, "Prompt", &i.Prompt) - delete(rawMsg, key) - case "response_format": - err = unpopulate(val, "ResponseFormat", &i.ResponseFormat) - delete(rawMsg, key) - case "size": - err = unpopulate(val, "Size", &i.Size) + case "type": + err = unpopulate(val, "configType", &p.configType) delete(rawMsg, key) - case "user": - err = unpopulate(val, "User", &i.User) + case "parameters": + err = unpopulate(val, "Parameters", &p.Parameters) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type ImageGenerations. -func (i ImageGenerations) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PineconeChatExtensionParameters. +func (p PineconeChatExtensionParameters) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populateTimeUnix(objectMap, "created", i.Created) - populateAny(objectMap, "data", i.Data) + populate(objectMap, "authentication", p.Authentication) + populate(objectMap, "embeddingDependency", p.EmbeddingDependency) + populate(objectMap, "environment", p.Environment) + populate(objectMap, "fieldsMapping", p.FieldsMapping) + populate(objectMap, "inScope", p.InScope) + populate(objectMap, "indexName", p.IndexName) + populate(objectMap, "roleInformation", p.RoleInformation) + populate(objectMap, "strictness", p.Strictness) + populate(objectMap, "topNDocuments", p.TopNDocuments) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type ImageGenerations. -func (i *ImageGenerations) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PineconeChatExtensionParameters. +func (p *PineconeChatExtensionParameters) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } for key, val := range rawMsg { var err error switch key { - case "created": - err = unpopulateTimeUnix(val, "Created", &i.Created) + case "authentication": + p.Authentication, err = unmarshalOnYourDataAuthenticationOptionsClassification(val) delete(rawMsg, key) - case "data": - err = unpopulate(val, "Data", &i.Data) + case "embeddingDependency": + p.EmbeddingDependency, err = unmarshalOnYourDataVectorizationSourceClassification(val) + delete(rawMsg, key) + case "environment": + err = unpopulate(val, "Environment", &p.Environment) + delete(rawMsg, key) + case "fieldsMapping": + err = unpopulate(val, "FieldsMapping", &p.FieldsMapping) + delete(rawMsg, key) + case "inScope": + err = unpopulate(val, "InScope", &p.InScope) + delete(rawMsg, key) + case "indexName": + err = unpopulate(val, "IndexName", &p.IndexName) + delete(rawMsg, key) + case "roleInformation": + err = unpopulate(val, "RoleInformation", &p.RoleInformation) + delete(rawMsg, key) + case "strictness": + err = unpopulate(val, "Strictness", &p.Strictness) + delete(rawMsg, key) + case "topNDocuments": + err = unpopulate(val, "TopNDocuments", &p.TopNDocuments) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", i, err) + return fmt.Errorf("unmarshalling type %T: %v", p, err) } } return nil } -// MarshalJSON implements the json.Marshaller interface for type PromptFilterResult. -func (p PromptFilterResult) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type PineconeFieldMappingOptions. +func (p PineconeFieldMappingOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "content_filter_results", p.ContentFilterResults) - populate(objectMap, "prompt_index", p.PromptIndex) + populate(objectMap, "contentFields", p.ContentFields) + populate(objectMap, "contentFieldsSeparator", p.ContentFieldsSeparator) + populate(objectMap, "filepathField", p.FilepathField) + populate(objectMap, "imageVectorFields", p.ImageVectorFields) + populate(objectMap, "titleField", p.TitleField) + populate(objectMap, "urlField", p.URLField) + populate(objectMap, "vectorFields", p.VectorFields) return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PromptFilterResult. -func (p *PromptFilterResult) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type PineconeFieldMappingOptions. +func (p *PineconeFieldMappingOptions) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { return fmt.Errorf("unmarshalling type %T: %v", p, err) @@ -1842,11 +3152,26 @@ func (p *PromptFilterResult) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "content_filter_results": - err = unpopulate(val, "ContentFilterResults", &p.ContentFilterResults) + case "contentFields": + err = unpopulate(val, "ContentFields", &p.ContentFields) delete(rawMsg, key) - case "prompt_index": - err = unpopulate(val, "PromptIndex", &p.PromptIndex) + case "contentFieldsSeparator": + err = unpopulate(val, "ContentFieldsSeparator", &p.ContentFieldsSeparator) + delete(rawMsg, key) + case "filepathField": + err = unpopulate(val, "FilepathField", &p.FilepathField) + delete(rawMsg, key) + case "imageVectorFields": + err = unpopulate(val, "ImageVectorFields", &p.ImageVectorFields) + delete(rawMsg, key) + case "titleField": + err = unpopulate(val, "TitleField", &p.TitleField) + delete(rawMsg, key) + case "urlField": + err = unpopulate(val, "URLField", &p.URLField) + delete(rawMsg, key) + case "vectorFields": + err = unpopulate(val, "VectorFields", &p.VectorFields) delete(rawMsg, key) } if err != nil { @@ -1856,44 +3181,32 @@ func (p *PromptFilterResult) UnmarshalJSON(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaller interface for type PromptFilterResultContentFilterResults. -func (p PromptFilterResultContentFilterResults) MarshalJSON() ([]byte, error) { +// MarshalJSON implements the json.Marshaller interface for type StopFinishDetails. +func (s StopFinishDetails) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "error", p.Error) - populate(objectMap, "hate", p.Hate) - populate(objectMap, "self_harm", p.SelfHarm) - populate(objectMap, "sexual", p.Sexual) - populate(objectMap, "violence", p.Violence) + populate(objectMap, "stop", s.Stop) + objectMap["type"] = "stop" return json.Marshal(objectMap) } -// UnmarshalJSON implements the json.Unmarshaller interface for type PromptFilterResultContentFilterResults. -func (p *PromptFilterResultContentFilterResults) UnmarshalJSON(data []byte) error { +// UnmarshalJSON implements the json.Unmarshaller interface for type StopFinishDetails. +func (s *StopFinishDetails) UnmarshalJSON(data []byte) error { var rawMsg map[string]json.RawMessage if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } for key, val := range rawMsg { var err error switch key { - case "error": - err = unpopulate(val, "Error", &p.Error) - delete(rawMsg, key) - case "hate": - err = unpopulate(val, "Hate", &p.Hate) - delete(rawMsg, key) - case "self_harm": - err = unpopulate(val, "SelfHarm", &p.SelfHarm) - delete(rawMsg, key) - case "sexual": - err = unpopulate(val, "Sexual", &p.Sexual) + case "stop": + err = unpopulate(val, "Stop", &s.Stop) delete(rawMsg, key) - case "violence": - err = unpopulate(val, "Violence", &p.Violence) + case "type": + err = unpopulate(val, "Type", &s.Type) delete(rawMsg, key) } if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", p, err) + return fmt.Errorf("unmarshalling type %T: %v", s, err) } } return nil diff --git a/sdk/ai/azopenai/options.go b/sdk/ai/azopenai/options.go index 10afca889f5e..6ae2902cb8b1 100644 --- a/sdk/ai/azopenai/options.go +++ b/sdk/ai/azopenai/options.go @@ -8,16 +8,18 @@ package azopenai -// beginAzureBatchImageGenerationOptions contains the optional parameters for the Client.beginAzureBatchImageGeneration +// GetAudioTranscriptionAsPlainTextOptions contains the optional parameters for the Client.GetAudioTranscriptionAsPlainText // method. -type beginAzureBatchImageGenerationOptions struct { - // Resumes the LRO from the provided token. - ResumeToken string +type GetAudioTranscriptionAsPlainTextOptions struct { + // placeholder for future optional parameters } // getAudioTranscriptionInternalOptions contains the optional parameters for the Client.getAudioTranscriptionInternal // method. type getAudioTranscriptionInternalOptions struct { + // The optional filename or descriptive identifier to associate with with the audio data. + Filename *string + // The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code such // as 'en' or 'fr'. Providing this known input language is optional but may improve // the accuracy and/or latency of transcription. @@ -39,8 +41,17 @@ type getAudioTranscriptionInternalOptions struct { Temperature *float32 } +// GetAudioTranslationAsPlainTextOptions contains the optional parameters for the Client.GetAudioTranslationAsPlainText +// method. +type GetAudioTranslationAsPlainTextOptions struct { + // placeholder for future optional parameters +} + // getAudioTranslationInternalOptions contains the optional parameters for the Client.getAudioTranslationInternal method. type getAudioTranslationInternalOptions struct { + // The optional filename or descriptive identifier to associate with with the audio data. + Filename *string + // The model to use for this translation request. Model *string @@ -77,3 +88,8 @@ type GetCompletionsOptions struct { type GetEmbeddingsOptions struct { // placeholder for future optional parameters } + +// GetImageGenerationsOptions contains the optional parameters for the Client.GetImageGenerations method. +type GetImageGenerationsOptions struct { + // placeholder for future optional parameters +} diff --git a/sdk/ai/azopenai/polymorphic_helpers.go b/sdk/ai/azopenai/polymorphic_helpers.go new file mode 100644 index 000000000000..39a44c5b11e6 --- /dev/null +++ b/sdk/ai/azopenai/polymorphic_helpers.go @@ -0,0 +1,267 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azopenai + +import "encoding/json" + +func unmarshalAzureChatExtensionConfigurationClassification(rawMsg json.RawMessage) (AzureChatExtensionConfigurationClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b AzureChatExtensionConfigurationClassification + switch m["type"] { + case string(AzureChatExtensionTypeAzureCognitiveSearch): + b = &AzureCognitiveSearchChatExtensionConfiguration{} + case string(AzureChatExtensionTypeAzureCosmosDB): + b = &AzureCosmosDBChatExtensionConfiguration{} + case string(AzureChatExtensionTypeAzureMachineLearningIndex): + b = &AzureMachineLearningIndexChatExtensionConfiguration{} + case string(AzureChatExtensionTypeElasticsearch): + b = &ElasticsearchChatExtensionConfiguration{} + case string(AzureChatExtensionTypePinecone): + b = &PineconeChatExtensionConfiguration{} + default: + b = &AzureChatExtensionConfiguration{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} + +func unmarshalAzureChatExtensionConfigurationClassificationArray(rawMsg json.RawMessage) ([]AzureChatExtensionConfigurationClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages []json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fArray := make([]AzureChatExtensionConfigurationClassification, len(rawMessages)) + for index, rawMessage := range rawMessages { + f, err := unmarshalAzureChatExtensionConfigurationClassification(rawMessage) + if err != nil { + return nil, err + } + fArray[index] = f + } + return fArray, nil +} + +func unmarshalChatCompletionsToolCallClassification(rawMsg json.RawMessage) (ChatCompletionsToolCallClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b ChatCompletionsToolCallClassification + + if m["type"] == nil && m["function"] != nil { + // WORKAROUND: the streaming results don't contain the proper role for functions, so we need to add these in. + m["type"] = string(ChatRoleFunction) + } + switch m["type"] { + case "function": + b = &ChatCompletionsFunctionToolCall{} + default: + b = &ChatCompletionsToolCall{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} + +func unmarshalChatCompletionsToolCallClassificationArray(rawMsg json.RawMessage) ([]ChatCompletionsToolCallClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages []json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fArray := make([]ChatCompletionsToolCallClassification, len(rawMessages)) + for index, rawMessage := range rawMessages { + f, err := unmarshalChatCompletionsToolCallClassification(rawMessage) + if err != nil { + return nil, err + } + fArray[index] = f + } + return fArray, nil +} + +func unmarshalChatCompletionsToolDefinitionClassification(rawMsg json.RawMessage) (ChatCompletionsToolDefinitionClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b ChatCompletionsToolDefinitionClassification + switch m["type"] { + case "function": + b = &ChatCompletionsFunctionToolDefinition{} + default: + b = &ChatCompletionsToolDefinition{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} + +func unmarshalChatCompletionsToolDefinitionClassificationArray(rawMsg json.RawMessage) ([]ChatCompletionsToolDefinitionClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages []json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fArray := make([]ChatCompletionsToolDefinitionClassification, len(rawMessages)) + for index, rawMessage := range rawMessages { + f, err := unmarshalChatCompletionsToolDefinitionClassification(rawMessage) + if err != nil { + return nil, err + } + fArray[index] = f + } + return fArray, nil +} + +func unmarshalChatFinishDetailsClassification(rawMsg json.RawMessage) (ChatFinishDetailsClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b ChatFinishDetailsClassification + switch m["type"] { + case "max_tokens": + b = &MaxTokensFinishDetails{} + case "stop": + b = &StopFinishDetails{} + default: + b = &ChatFinishDetails{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} + +func unmarshalChatRequestMessageClassification(rawMsg json.RawMessage) (ChatRequestMessageClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b ChatRequestMessageClassification + switch m["role"] { + case string(ChatRoleAssistant): + b = &ChatRequestAssistantMessage{} + case string(ChatRoleSystem): + b = &ChatRequestSystemMessage{} + case string(ChatRoleTool): + b = &ChatRequestToolMessage{} + case string(ChatRoleUser): + b = &ChatRequestUserMessage{} + default: + b = &ChatRequestMessage{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} + +func unmarshalChatRequestMessageClassificationArray(rawMsg json.RawMessage) ([]ChatRequestMessageClassification, error) { + if rawMsg == nil { + return nil, nil + } + var rawMessages []json.RawMessage + if err := json.Unmarshal(rawMsg, &rawMessages); err != nil { + return nil, err + } + fArray := make([]ChatRequestMessageClassification, len(rawMessages)) + for index, rawMessage := range rawMessages { + f, err := unmarshalChatRequestMessageClassification(rawMessage) + if err != nil { + return nil, err + } + fArray[index] = f + } + return fArray, nil +} + +func unmarshalOnYourDataAuthenticationOptionsClassification(rawMsg json.RawMessage) (OnYourDataAuthenticationOptionsClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b OnYourDataAuthenticationOptionsClassification + switch m["type"] { + case string(OnYourDataAuthenticationTypeAPIKey): + b = &OnYourDataAPIKeyAuthenticationOptions{} + case string(OnYourDataAuthenticationTypeConnectionString): + b = &OnYourDataConnectionStringAuthenticationOptions{} + case string(OnYourDataAuthenticationTypeKeyAndKeyID): + b = &OnYourDataKeyAndKeyIDAuthenticationOptions{} + case string(OnYourDataAuthenticationTypeSystemAssignedManagedIdentity): + b = &OnYourDataSystemAssignedManagedIdentityAuthenticationOptions{} + case string(OnYourDataAuthenticationTypeUserAssignedManagedIdentity): + b = &OnYourDataUserAssignedManagedIdentityAuthenticationOptions{} + default: + b = &OnYourDataAuthenticationOptions{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} + +func unmarshalOnYourDataVectorizationSourceClassification(rawMsg json.RawMessage) (OnYourDataVectorizationSourceClassification, error) { + if rawMsg == nil { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b OnYourDataVectorizationSourceClassification + switch m["type"] { + case string(OnYourDataVectorizationSourceTypeDeploymentName): + b = &OnYourDataDeploymentNameVectorizationSource{} + case string(OnYourDataVectorizationSourceTypeEndpoint): + b = &OnYourDataEndpointVectorizationSource{} + case string(OnYourDataVectorizationSourceTypeModelID): + b = &OnYourDataModelIDVectorizationSource{} + default: + b = &OnYourDataVectorizationSource{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} diff --git a/sdk/ai/azopenai/response_types.go b/sdk/ai/azopenai/response_types.go index 6c504a261576..b79526aad61e 100644 --- a/sdk/ai/azopenai/response_types.go +++ b/sdk/ai/azopenai/response_types.go @@ -8,10 +8,9 @@ package azopenai -// azureBatchImageGenerationInternalResponse contains the response from method Client.beginAzureBatchImageGeneration. -type azureBatchImageGenerationInternalResponse struct { - // A polling status update or final response payload for an image operation. - batchImageGenerationOperationResponse +// GetAudioTranscriptionAsPlainTextResponse contains the response from method Client.GetAudioTranscriptionAsPlainText. +type GetAudioTranscriptionAsPlainTextResponse struct { + Value *string } // getAudioTranscriptionInternalResponse contains the response from method Client.getAudioTranscriptionInternal. @@ -20,10 +19,15 @@ type getAudioTranscriptionInternalResponse struct { AudioTranscription } +// GetAudioTranslationAsPlainTextResponse contains the response from method Client.GetAudioTranslationAsPlainText. +type GetAudioTranslationAsPlainTextResponse struct { + Value *string +} + // getAudioTranslationInternalResponse contains the response from method Client.getAudioTranslationInternal. type getAudioTranslationInternalResponse struct { - // Result information for an operation that transcribed spoken audio into written text. - AudioTranscription + // Result information for an operation that translated spoken audio into written text. + AudioTranslation } // GetChatCompletionsResponse contains the response from method Client.GetChatCompletions. @@ -57,3 +61,9 @@ type GetEmbeddingsResponse struct { // recommendations, and other similar scenarios. Embeddings } + +// GetImageGenerationsResponse contains the response from method Client.GetImageGenerations. +type GetImageGenerationsResponse struct { + // The result of a successful image generation operation. + ImageGenerations +}